Skip to content
Permalink
Browse files

Merge

  • Loading branch information
JesperIRL committed Dec 18, 2019
2 parents c670ebb + 4b1be3e commit e788e6dd46c36238f36cb0e1221a9c40c720b7b0
Showing with 392 additions and 94 deletions.
  1. +17 −12 src/hotspot/share/code/compiledMethod.cpp
  2. +3 −0 src/hotspot/share/code/compiledMethod.hpp
  3. +10 −3 src/hotspot/share/code/nmethod.cpp
  4. +5 −5 src/hotspot/share/gc/parallel/psParallelCompact.hpp
  5. +2 −2 src/hotspot/share/gc/shared/genCollectedHeap.cpp
  6. +3 −4 src/hotspot/share/gc/z/zRootsIterator.cpp
  7. +10 −0 src/hotspot/share/jvmci/jvmci_globals.cpp
  8. +4 −3 src/hotspot/share/opto/loopopts.cpp
  9. +16 −9 src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp
  10. +23 −1 src/hotspot/share/prims/jvmtiImpl.cpp
  11. +2 −0 src/hotspot/share/prims/jvmtiImpl.hpp
  12. +5 −0 src/hotspot/share/prims/jvmtiThreadState.cpp
  13. +1 −0 src/hotspot/share/prims/jvmtiThreadState.hpp
  14. +2 −2 src/hotspot/share/runtime/deoptimization.cpp
  15. +6 −1 src/hotspot/share/runtime/serviceThread.cpp
  16. +5 −0 src/hotspot/share/runtime/thread.cpp
  17. +0 −3 src/hotspot/share/services/management.cpp
  18. +1 −1 src/java.base/share/classes/sun/security/tools/keytool/Main.java
  19. +9 −5 src/java.base/share/classes/sun/security/util/AbstractAlgorithmConstraints.java
  20. +62 −6 src/java.base/share/classes/sun/security/util/ConstraintsParameters.java
  21. +20 −1 src/java.base/share/classes/sun/security/util/CurveDB.java
  22. +60 −21 src/java.base/share/classes/sun/security/util/DisabledAlgorithmConstraints.java
  23. +3 −3 src/java.base/share/classes/sun/security/util/LegacyAlgorithmConstraints.java
  24. +33 −4 src/java.base/share/conf/security/java.security
  25. +3 −0 src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/CommandProcessor.java
  26. +1 −1 test/hotspot/jtreg/ProblemList.txt
  27. +60 −0 test/hotspot/jtreg/compiler/loopopts/TestIrreducibleLoopWithVNNI.java
  28. +3 −3 test/hotspot/jtreg/serviceability/jvmti/CompiledMethodLoad/Zombie.java
  29. +18 −3 test/hotspot/jtreg/serviceability/sa/ClhsdbLauncher.java
  30. +5 −1 test/jdk/TEST.groups
@@ -549,6 +549,21 @@ bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
return true;
}

void CompiledMethod::run_nmethod_entry_barrier() {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) {
// We want to keep an invariant that nmethods found through iterations of a Thread's
// nmethods found in safepoints have gone through an entry barrier and are not armed.
// By calling this nmethod entry barrier, it plays along and acts
// like any other nmethod found on the stack of a thread (fewer surprises).
nmethod* nm = as_nmethod_or_null();
if (nm != NULL) {
bool alive = bs_nm->nmethod_entry_barrier(nm);
assert(alive, "should be alive");
}
}
}

void CompiledMethod::cleanup_inline_caches(bool clean_all) {
for (;;) {
ICRefillVerifier ic_refill_verifier;
@@ -557,18 +572,8 @@ void CompiledMethod::cleanup_inline_caches(bool clean_all) {
return;
}
}
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) {
// We want to keep an invariant that nmethods found through iterations of a Thread's
// nmethods found in safepoints have gone through an entry barrier and are not armed.
// By calling this nmethod entry barrier from the sweeper, it plays along and acts
// like any other nmethod found on the stack of a thread (fewer surprises).
nmethod* nm = as_nmethod_or_null();
if (nm != NULL) {
bool alive = bs_nm->nmethod_entry_barrier(nm);
assert(alive, "should be alive");
}
}
// Call this nmethod entry barrier from the sweeper.
run_nmethod_entry_barrier();
InlineCacheBuffer::refill_ic_stubs();
}
}
@@ -366,6 +366,9 @@ class CompiledMethod : public CodeBlob {
virtual void clear_inline_caches();
void clear_ic_callsites();

// Execute nmethod barrier code, as if entering through nmethod call.
void run_nmethod_entry_barrier();

// Verify and count cached icholder relocations.
int verify_icholder_relocations();
void verify_oop_relocations();
@@ -1567,6 +1567,12 @@ void nmethod::flush_dependencies(bool delete_immediately) {
// Transfer information from compilation to jvmti
void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {

// Don't post this nmethod load event if it is already dying
// because the sweeper might already be deleting this nmethod.
if (is_not_entrant() && can_convert_to_zombie()) {
return;
}

// This is a bad time for a safepoint. We don't want
// this nmethod to get unloaded while we're queueing the event.
NoSafepointVerifier nsv;
@@ -1585,15 +1591,16 @@ void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
if (JvmtiExport::should_post_compiled_method_load()) {
// Only post unload events if load events are found.
set_load_reported();
// Keep sweeper from turning this into zombie until it is posted.
mark_as_seen_on_stack();

// If a JavaThread hasn't been passed in, let the Service thread
// (which is a real Java thread) post the event
JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this);
if (state == NULL) {
// Execute any barrier code for this nmethod as if it's called, since
// keeping it alive looks like stack walking.
run_nmethod_entry_barrier();
ServiceThread::enqueue_deferred_event(&event);
} else {
// This enters the nmethod barrier outside in the caller.
state->enqueue_event(&event);
}
}
@@ -630,25 +630,25 @@ inline bool ParallelCompactData::RegionData::claim()
}

inline bool ParallelCompactData::RegionData::mark_normal() {
return Atomic::cmpxchg(&_shadow_state, UnusedRegion, NormalRegion, memory_order_relaxed) == UnusedRegion;
return Atomic::cmpxchg(&_shadow_state, UnusedRegion, NormalRegion) == UnusedRegion;
}

inline bool ParallelCompactData::RegionData::mark_shadow() {
if (_shadow_state != UnusedRegion) return false;
return Atomic::cmpxchg(&_shadow_state, UnusedRegion, ShadowRegion, memory_order_relaxed) == UnusedRegion;
return Atomic::cmpxchg(&_shadow_state, UnusedRegion, ShadowRegion) == UnusedRegion;
}

inline void ParallelCompactData::RegionData::mark_filled() {
int old = Atomic::cmpxchg(&_shadow_state, ShadowRegion, FilledShadow, memory_order_relaxed);
int old = Atomic::cmpxchg(&_shadow_state, ShadowRegion, FilledShadow);
assert(old == ShadowRegion, "Fail to mark the region as filled");
}

inline bool ParallelCompactData::RegionData::mark_copied() {
return Atomic::cmpxchg(&_shadow_state, FilledShadow, CopiedShadow, memory_order_relaxed) == FilledShadow;
return Atomic::cmpxchg(&_shadow_state, FilledShadow, CopiedShadow) == FilledShadow;
}

void ParallelCompactData::RegionData::shadow_to_normal() {
int old = Atomic::cmpxchg(&_shadow_state, ShadowRegion, NormalRegion, memory_order_relaxed);
int old = Atomic::cmpxchg(&_shadow_state, ShadowRegion, NormalRegion);
assert(old == ShadowRegion, "Fail to mark the region as finish");
}

@@ -472,7 +472,7 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
HandleMark hm; // Discard invalid handles created during verification
Universe::verify("Before GC");
}
COMPILER2_PRESENT(DerivedPointerTable::clear());
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());

if (restore_marks_for_biased_locking) {
// We perform this mark word preservation work lazily
@@ -520,7 +520,7 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
rp->verify_no_references_recorded();
}

COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());

gen->stat_record()->accumulated_time.stop();

@@ -44,8 +44,7 @@
#include "memory/universe.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "runtime/atomic.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/atomic.hpp"#include "runtime/safepoint.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/thread.hpp"
#include "runtime/vmThread.hpp"
@@ -208,7 +207,7 @@ ZRootsIterator::ZRootsIterator(bool visit_jvmti_weak_export) :
_code_cache(this) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
ZStatTimer timer(ZSubPhasePauseRootsSetup);
COMPILER2_PRESENT(DerivedPointerTable::clear());
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
if (ClassUnloading) {
nmethod::oops_do_marking_prologue();
} else {
@@ -225,7 +224,7 @@ ZRootsIterator::~ZRootsIterator() {
ZNMethod::oops_do_end();
}

COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
}

void ZRootsIterator::do_universe(ZRootsIteratorClosure* cl) {
@@ -114,6 +114,16 @@ bool JVMCIGlobals::check_jvmci_flags_are_consistent() {
CHECK_NOT_SET(JVMCILibPath, EnableJVMCI)
CHECK_NOT_SET(JVMCILibDumpJNIConfig, EnableJVMCI)

#ifndef COMPILER2
JVMCI_FLAG_CHECKED(MaxVectorSize)
JVMCI_FLAG_CHECKED(ReduceInitialCardMarks)
JVMCI_FLAG_CHECKED(UseMultiplyToLenIntrinsic)
JVMCI_FLAG_CHECKED(UseSquareToLenIntrinsic)
JVMCI_FLAG_CHECKED(UseMulAddIntrinsic)
JVMCI_FLAG_CHECKED(UseMontgomeryMultiplyIntrinsic)
JVMCI_FLAG_CHECKED(UseMontgomerySquareIntrinsic)
#endif // !COMPILER2

#ifndef PRODUCT
#define JVMCI_CHECK4(type, name, value, doc) assert(name##checked, #name " flag not checked");
#define JVMCI_CHECK3(type, name, doc) assert(name##checked, #name " flag not checked");
@@ -498,9 +498,10 @@ Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) {
Node * in2 = n->in(2);
if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) {
IdealLoopTree* loop_n = get_loop(get_ctrl(n));
if (loop_n->_head->as_Loop()->is_valid_counted_loop() &&
Matcher::match_rule_supported(Op_MulAddS2I) &&
Matcher::match_rule_supported(Op_MulAddVS2VI)) {
if (loop_n->is_counted() &&
loop_n->_head->as_Loop()->is_valid_counted_loop() &&
Matcher::match_rule_supported(Op_MulAddVS2VI) &&
Matcher::match_rule_supported(Op_MulAddS2I)) {
Node* mul_in1 = in1->in(1);
Node* mul_in2 = in1->in(2);
Node* mul_in3 = in2->in(1);
@@ -34,6 +34,7 @@
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/vmThread.hpp"

// Support class to collect a list of the non-nmethod CodeBlobs in
@@ -222,16 +223,22 @@ jvmtiError JvmtiCodeBlobEvents::generate_dynamic_code_events(JvmtiEnv* env) {
jvmtiError JvmtiCodeBlobEvents::generate_compiled_method_load_events(JvmtiEnv* env) {
JvmtiThreadState* state = JvmtiThreadState::state_for(JavaThread::current());
{
// Walk the CodeCache notifying for live nmethods, don't release the CodeCache_lock
// because the sweeper may be running concurrently.
// Save events to the queue for posting outside the CodeCache_lock.
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// Iterate over non-profiled and profiled nmethods
NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
while(iter.next()) {
nmethod* current = iter.method();
current->post_compiled_method_load_event(state);
NoSafepointVerifier nsv; // safepoints are not safe while collecting methods to post.
{
// Walk the CodeCache notifying for live nmethods, don't release the CodeCache_lock
// because the sweeper may be running concurrently.
// Save events to the queue for posting outside the CodeCache_lock.
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// Iterate over non-profiled and profiled nmethods
NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
while(iter.next()) {
nmethod* current = iter.method();
current->post_compiled_method_load_event(state);
}
}

// Enter nmethod barrier code if present outside CodeCache_lock
state->run_nmethod_entry_barriers();
}

// Now post all the events outside the CodeCache_lock.
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/nmethod.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/oopMapCache.hpp"
#include "jvmtifiles/jvmtiEnv.hpp"
@@ -992,6 +993,12 @@ void JvmtiDeferredEvent::post_compiled_method_load_event(JvmtiEnv* env) {
JvmtiExport::post_compiled_method_load(env, nm);
}

void JvmtiDeferredEvent::run_nmethod_entry_barriers() {
if (_type == TYPE_COMPILED_METHOD_LOAD) {
_event_data.compiled_method_load->run_nmethod_entry_barrier();
}
}


// Keep the nmethod for compiled_method_load from being unloaded.
void JvmtiDeferredEvent::oops_do(OopClosure* f, CodeBlobClosure* cf) {
@@ -1008,8 +1015,16 @@ void JvmtiDeferredEvent::nmethods_do(CodeBlobClosure* cf) {
}
}


bool JvmtiDeferredEventQueue::has_events() {
return _queue_head != NULL;
// We save the queued events before the live phase and post them when it starts.
// This code could skip saving the events on the queue before the live
// phase and ignore them, but this would change how we do things now.
// Starting the service thread earlier causes this to be called before the live phase begins.
// The events on the queue should all be posted after the live phase so this is an
// ok check. Before the live phase, DynamicCodeGenerated events are posted directly.
// If we add other types of events to the deferred queue, this could get ugly.
return JvmtiEnvBase::get_phase() == JVMTI_PHASE_LIVE && _queue_head != NULL;
}

void JvmtiDeferredEventQueue::enqueue(JvmtiDeferredEvent event) {
@@ -1057,6 +1072,13 @@ void JvmtiDeferredEventQueue::post(JvmtiEnv* env) {
}
}

void JvmtiDeferredEventQueue::run_nmethod_entry_barriers() {
for(QueueNode* node = _queue_head; node != NULL; node = node->next()) {
node->event().run_nmethod_entry_barriers();
}
}


void JvmtiDeferredEventQueue::oops_do(OopClosure* f, CodeBlobClosure* cf) {
for(QueueNode* node = _queue_head; node != NULL; node = node->next()) {
node->event().oops_do(f, cf);
@@ -481,6 +481,7 @@ class JvmtiDeferredEvent {
// Actually posts the event.
void post() NOT_JVMTI_RETURN;
void post_compiled_method_load_event(JvmtiEnv* env) NOT_JVMTI_RETURN;
void run_nmethod_entry_barriers() NOT_JVMTI_RETURN;
// Sweeper support to keep nmethods from being zombied while in the queue.
void nmethods_do(CodeBlobClosure* cf) NOT_JVMTI_RETURN;
// GC support to keep nmethod from being unloaded while in the queue.
@@ -522,6 +523,7 @@ class JvmtiDeferredEventQueue : public CHeapObj<mtInternal> {
// Post all events in the queue for the current Jvmti environment
void post(JvmtiEnv* env) NOT_JVMTI_RETURN;
void enqueue(JvmtiDeferredEvent event) NOT_JVMTI_RETURN;
void run_nmethod_entry_barriers();

// Sweeper support to keep nmethods from being zombied while in the queue.
void nmethods_do(CodeBlobClosure* cf) NOT_JVMTI_RETURN;
@@ -432,3 +432,8 @@ void JvmtiThreadState::post_events(JvmtiEnv* env) {
}
}

void JvmtiThreadState::run_nmethod_entry_barriers() {
if (_jvmti_event_queue != NULL) {
_jvmti_event_queue->run_nmethod_entry_barriers();
}
}
@@ -398,6 +398,7 @@ class JvmtiThreadState : public CHeapObj<mtInternal> {
// Thread local event queue, which doesn't require taking the Service_lock.
void enqueue_event(JvmtiDeferredEvent* event);
void post_events(JvmtiEnv* env);
void run_nmethod_entry_barriers();
};

class RedefineVerifyMark : public StackObj {
@@ -302,7 +302,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread

// Reallocate the non-escaping objects and restore their fields. Then
// relock objects if synchronization on them was eliminated.
if (jvmci_enabled || (DoEscapeAnalysis && EliminateAllocations)) {
if (jvmci_enabled COMPILER2_PRESENT( || (DoEscapeAnalysis && EliminateAllocations) )) {
realloc_failures = eliminate_allocations(thread, exec_mode, cm, deoptee, map, chunk);
}
#endif // COMPILER2_OR_JVMCI
@@ -318,7 +318,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
NoSafepointVerifier no_safepoint;

#if COMPILER2_OR_JVMCI
if (jvmci_enabled || ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks)) {
if (jvmci_enabled COMPILER2_PRESENT( || ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks) )) {
eliminate_locks(thread, chunk, realloc_failures);
}
#endif // COMPILER2_OR_JVMCI
@@ -48,7 +48,8 @@
ServiceThread* ServiceThread::_instance = NULL;
JvmtiDeferredEvent* ServiceThread::_jvmti_event = NULL;
// The service thread has it's own static deferred event queue.
// Events can be posted before the service thread is created.
// Events can be posted before JVMTI vm_start, so it's too early to call JvmtiThreadState::state_for
// to add this field to the per-JavaThread event queue. TODO: fix this sometime later
JvmtiDeferredEventQueue ServiceThread::_jvmti_service_queue;

void ServiceThread::initialize() {
@@ -195,6 +196,10 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {

void ServiceThread::enqueue_deferred_event(JvmtiDeferredEvent* event) {
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
// If you enqueue events before the service thread runs, gc and the sweeper
// cannot keep the nmethod alive. This could be restricted to compiled method
// load and unload events, if we wanted to be picky.
assert(_instance != NULL, "cannot enqueue events before the service thread runs");
_jvmti_service_queue.enqueue(*event);
Service_lock->notify_all();
}
@@ -88,6 +88,7 @@
#include "runtime/safepoint.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/stubRoutines.hpp"
@@ -3995,6 +3996,10 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
Chunk::start_chunk_pool_cleaner_task();
}

// Start the service thread
// The service thread enqueues JVMTI deferred events and does various hashtable
// and other cleanups. Needs to start before the compilers start posting events.
ServiceThread::initialize();

// initialize compiler(s)
#if defined(COMPILER1) || COMPILER2_OR_JVMCI
@@ -46,7 +46,6 @@
#include "runtime/jniHandles.inline.hpp"
#include "runtime/notificationThread.hpp"
#include "runtime/os.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.hpp"
#include "services/classLoadingService.hpp"
@@ -147,8 +146,6 @@ void Management::init() {
}

void Management::initialize(TRAPS) {
// Start the service thread
ServiceThread::initialize();
if (UseNotificationThread) {
NotificationThread::initialize();
}

0 comments on commit e788e6d

Please sign in to comment.
You can’t perform that action at this time.