Skip to content
Permalink
Browse files

Initial scoped locals implementation

  • Loading branch information
Andrew Haley
Andrew Haley committed Feb 14, 2020
1 parent 07e3880 commit adb3efeaf76721c86191138300517eeb8d6856d4
@@ -170,6 +170,8 @@ JVM_RegisterContinuationMethods
JVM_RegisterSignal
JVM_ReleaseUTF
JVM_ResumeThread
JVM_ScopedCache
JVM_SetScopedCache
JVM_SetArrayElement
JVM_SetClassSigners
JVM_SetNativeThreadName
@@ -152,6 +152,7 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) {
case vmIntrinsics::_isInstance:
case vmIntrinsics::_isPrimitive:
case vmIntrinsics::_currentThread:
case vmIntrinsics::_scopedCache:
case vmIntrinsics::_dabs:
case vmIntrinsics::_dsqrt:
case vmIntrinsics::_dsin:
@@ -1314,6 +1314,13 @@ void LIRGenerator::do_currentThread(Intrinsic* x) {
}


void LIRGenerator::do_scopedCache(Intrinsic* x) {
assert(x->number_of_arguments() == 0, "wrong type");
LIR_Opr reg = rlock_result(x);
__ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::scopedCache_offset()), T_OBJECT), reg);
}


void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
assert(x->number_of_arguments() == 1, "wrong type");
LIRItem receiver(x->argument_at(0), this);
@@ -3038,6 +3045,7 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
case vmIntrinsics::_isPrimitive: do_isPrimitive(x); break;
case vmIntrinsics::_getClass: do_getClass(x); break;
case vmIntrinsics::_currentThread: do_currentThread(x); break;
case vmIntrinsics::_scopedCache: do_scopedCache(x); break;

case vmIntrinsics::_dlog: // fall through
case vmIntrinsics::_dlog10: // fall through
@@ -253,6 +253,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void do_isPrimitive(Intrinsic* x);
void do_getClass(Intrinsic* x);
void do_currentThread(Intrinsic* x);
void do_scopedCache(Intrinsic* x);
void do_FmaIntrinsic(Intrinsic* x);
void do_MathIntrinsic(Intrinsic* x);
void do_LibmIntrinsic(Intrinsic* x);
@@ -4511,6 +4511,8 @@ class UnsafeConstantsFixup : public FieldClosure {
mirror->bool_field_put(fd->offset(), _use_unaligned_access);
} else if (fd->name() == vmSymbols::data_cache_line_flush_size_name()) {
mirror->int_field_put(fd->offset(), _data_cache_line_flush_size);
} else if (fd->name() == vmSymbols::scoped_cache_shift_name()) {
mirror->int_field_put(fd->offset(), ScopedCacheSize ? exact_log2(ScopedCacheSize) : -1);
} else {
assert(false, "unexpected UnsafeConstants field");
}
@@ -363,6 +363,8 @@ bool vmIntrinsics::preserves_state(vmIntrinsics::ID id) {
case vmIntrinsics::_getClass:
case vmIntrinsics::_isInstance:
case vmIntrinsics::_currentThread:
case vmIntrinsics::_scopedCache:
case vmIntrinsics::_setScopedCache:
case vmIntrinsics::_dabs:
case vmIntrinsics::_fabs:
case vmIntrinsics::_iabs:
@@ -411,6 +413,8 @@ bool vmIntrinsics::can_trap(vmIntrinsics::ID id) {
case vmIntrinsics::_doubleToRawLongBits:
case vmIntrinsics::_longBitsToDouble:
case vmIntrinsics::_currentThread:
case vmIntrinsics::_scopedCache:
case vmIntrinsics::_setScopedCache:
case vmIntrinsics::_dabs:
case vmIntrinsics::_fabs:
case vmIntrinsics::_iabs:
@@ -578,6 +582,8 @@ bool vmIntrinsics::is_disabled_by_flags(vmIntrinsics::ID id) {
case vmIntrinsics::_currentThread:
if (!InlineThreadNatives) return true;
break;
case vmIntrinsics::_scopedCache:
case vmIntrinsics::_setScopedCache:
case vmIntrinsics::_floatToRawIntBits:
case vmIntrinsics::_intBitsToFloat:
case vmIntrinsics::_doubleToRawLongBits:
@@ -500,6 +500,7 @@
template(big_endian_name, "BIG_ENDIAN") \
template(use_unaligned_access_name, "UNALIGNED_ACCESS") \
template(data_cache_line_flush_size_name, "DATA_CACHE_LINE_FLUSH_SIZE") \
template(scoped_cache_shift_name, "SCOPED_CACHE_SHIFT") \
\
/* name symbols needed by intrinsics */ \
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
@@ -915,8 +916,14 @@
do_name( arraycopy_name, "arraycopy") \
do_signature(arraycopy_signature, "(Ljava/lang/Object;ILjava/lang/Object;II)V") \
do_intrinsic(_currentThread, java_lang_Thread, currentThread_name, currentThread_signature, F_S) \
do_intrinsic(_scopedCache, java_lang_Thread, scopedCache_name, scopedCache_signature, F_S) \
do_intrinsic(_setScopedCache, java_lang_Thread, setScopedCache_name, setScopedCache_signature, F_S) \
do_name( currentThread_name, "currentThread0") \
do_name( scopedCache_name, "scopedCache") \
do_name( setScopedCache_name, "setScopedCache") \
do_signature(currentThread_signature, "()Ljava/lang/Thread;") \
do_signature(scopedCache_signature, "()[Ljava/lang/Object;") \
do_signature(setScopedCache_signature, "([Ljava/lang/Object;)V") \
\
/* reflective intrinsics, for java/lang/Class, etc. */ \
do_intrinsic(_isAssignableFrom, java_lang_Class, isAssignableFrom_name, class_boolean_signature, F_RN) \
@@ -213,6 +213,12 @@ JVM_CallStackWalk(JNIEnv *env, jobject stackStream, jlong mode,
jint skip_frames, jobject contScope, jobject cont,
jint frame_count, jint start_index, jobjectArray frames);

JNIEXPORT jobject JNICALL
JVM_ScopedCache(JNIEnv *env, jclass threadClass);

JNIEXPORT void JNICALL
JVM_SetScopedCache(JNIEnv *env, jclass threadClass, jobject theCache);

JNIEXPORT jint JNICALL
JVM_MoreStackWalk(JNIEnv *env, jobject stackStream, jlong mode, jlong anchor,
jint frame_count, jint start_index,
@@ -582,6 +582,8 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
case vmIntrinsics::_storeFence:
case vmIntrinsics::_fullFence:
case vmIntrinsics::_currentThread:
case vmIntrinsics::_scopedCache:
case vmIntrinsics::_setScopedCache:
#ifdef JFR_HAVE_INTRINSICS
case vmIntrinsics::_counterTime:
case vmIntrinsics::_getClassId:
@@ -258,6 +258,8 @@ class LibraryCallKit : public GraphKit {
bool inline_unsafe_writebackSync0(bool is_pre);
bool inline_unsafe_copyMemory();
bool inline_native_currentThread();
bool inline_native_scopedCache();
bool inline_native_setScopedCache();

bool inline_native_time_funcs(address method, const char* funcName);
#ifdef JFR_HAVE_INTRINSICS
@@ -758,6 +760,9 @@ bool LibraryCallKit::try_to_inline(int predicate) {

case vmIntrinsics::_currentThread: return inline_native_currentThread();

case vmIntrinsics::_scopedCache: return inline_native_scopedCache();
case vmIntrinsics::_setScopedCache: return inline_native_setScopedCache();

#ifdef JFR_HAVE_INTRINSICS
case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");
case vmIntrinsics::_getClassId: return inline_native_classID();
@@ -3045,6 +3050,40 @@ bool LibraryCallKit::inline_native_currentThread() {
return true;
}

//------------------------inline_native_scopedCache------------------
bool LibraryCallKit::inline_native_scopedCache() {
ciKlass *objects_klass = ciObjArrayKlass::make(env()->Object_klass());
const TypeOopPtr *etype = TypeOopPtr::make_from_klass(env()->Object_klass());

// It might be nice to eliminate the bounds check on the cache array
// by replacing TypeInt::POS here with
// TypeInt::make(ScopedCacheSize*2), but this causes a performance
// regression in some test cases.
const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
bool xk = etype->klass_is_exact();

// Because we create the scoped cache lazily we have to make the
// type of the result BotPTR.
const Type* objects_type = TypeAryPtr::make(TypePtr::BotPTR, arr0, objects_klass, xk, 0);
Node* thread = _gvn.transform(new ThreadLocalNode());
Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedCache_offset()));
Node* threadObj = make_load(NULL, p, objects_type, T_OBJECT, MemNode::unordered);
set_result(threadObj);

return true;
}

//------------------------inline_native_setScopedCache------------------
bool LibraryCallKit::inline_native_setScopedCache() {
Node* arr = argument(0);
Node* thread = _gvn.transform(new ThreadLocalNode());
Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::scopedCache_offset()));
const TypePtr *adr_type = _gvn.type(p)->isa_ptr();
store_to_memory(control(), p, arr, T_OBJECT, adr_type, MemNode::unordered);

return true;
}

//---------------------------load_mirror_from_klass----------------------------
// Given a klass oop, load its java mirror (a java.lang.Class oop).
Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
@@ -3124,6 +3124,25 @@ JVM_ENTRY(void, JVM_Interrupt(JNIEnv* env, jobject jthread))
}
JVM_END

JVM_ENTRY(jobject, JVM_ScopedCache(JNIEnv* env, jclass threadClass))
JVMWrapper("JVM_ScopedCache");
oop theCache = thread->_scopedCache;
if (theCache) {
arrayOop objs = arrayOop(theCache);
assert(objs->length() == ScopedCacheSize * 2, "wrong length");
}
return JNIHandles::make_local(env, theCache);
JVM_END

JVM_ENTRY(void, JVM_SetScopedCache(JNIEnv* env, jclass threadClass,
jobject theCache))
JVMWrapper("JVM_SetScopedCache");
arrayOop objs = arrayOop(JNIHandles::resolve(theCache));
if (objs != NULL) {
assert(objs->length() == ScopedCacheSize * 2, "wrong length");
}
thread->_scopedCache = objs;
JVM_END

// Return true iff the current thread has locked the object passed in

@@ -2483,6 +2483,10 @@ const size_t minimumSymbolTableSize = 1024;
product(bool, UseContinuationStreamingCopy, false, \
"Use streaming memory when copying continuation stack chunks") \
\
product(intx, ScopedCacheSize, 16, \
"Size of the cache for scoped values") \
range(0, max_intx) \
\
experimental(ccstr, AllocateOldGenAt, NULL, \
"Path to the directoy where a temporary file will be " \
"created to use as the backing store for old generation." \
@@ -1733,6 +1733,8 @@ void JavaThread::initialize() {

_class_to_be_initialized = NULL;

_scopedCache = NULL;

pd_initialize();
}

@@ -3032,6 +3034,8 @@ void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
if (jvmti_thread_state() != NULL) {
jvmti_thread_state()->oops_do(f, cf);
}

f->do_oop(&_scopedCache);
}

#ifdef ASSERT
@@ -5120,3 +5124,9 @@ void Threads::verify() {
VMThread* thread = VMThread::vm_thread();
if (thread != NULL) thread->verify();
}

void JavaThread::allocate_scoped_hash_table(int count) {
if (count > 0) {
_scopedCache = oopFactory::new_objectArray(count, this);
}
}
@@ -1228,6 +1228,13 @@ class JavaThread: public Thread {
friend class ThreadWaitTransition;
friend class VM_Exit;

public:

oop _scopedCache;
jlong _scoped_hash_table_shift;

void allocate_scoped_hash_table(int count);

void initialize(); // Initialized the instance variables

public:
@@ -1784,6 +1791,8 @@ class JavaThread: public Thread {
void clr_do_not_unlock(void) { _do_not_unlock_if_synchronized = false; }
bool do_not_unlock(void) { return _do_not_unlock_if_synchronized; }

static ByteSize scopedCache_offset() { return byte_offset_of(JavaThread, _scopedCache); }

// For assembly stub generation
static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj); }
static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment); }
@@ -160,6 +160,8 @@ private static Thread currentCarrierThread() {
private int stackWatermark;
private int refStackWatermark;

private Object[] scopedCache;

// private long[] nmethods = null; // grows up
// private int numNmethods = 0;

@@ -292,9 +294,12 @@ private Continuation innermost() {
private void mount() {
if (!compareAndSetMounted(false, true))
throw new IllegalStateException("Mounted!!!!");
Thread.setScopedCache(scopedCache);
}

private void unmount() {
scopedCache = Thread.scopedCache();
Thread.setScopedCache(null);
setMounted(false);
}

0 comments on commit adb3efe

Please sign in to comment.
You can’t perform that action at this time.