Skip to content
Permalink
Browse files

8249837: Avoid direct or implicit Thread::current() calls when we alr…

…eady have a current thread variable

Add current thread OR remove unneeded HandleMark

Reviewed-by: kvn, dholmes
  • Loading branch information
Coleen Phillimore
Coleen Phillimore committed Jul 30, 2020
1 parent 79f02a6 commit 9798a0846bda983afce55adf52a3b054cd0ffb4d
Showing with 108 additions and 143 deletions.
  1. +2 −2 src/hotspot/share/ci/ciReplay.cpp
  2. +2 −2 src/hotspot/share/classfile/classLoaderDataGraph.cpp
  3. +2 −2 src/hotspot/share/classfile/klassFactory.cpp
  4. +1 −1 src/hotspot/share/classfile/systemDictionaryShared.cpp
  5. +3 −5 src/hotspot/share/code/nmethod.cpp
  6. +4 −2 src/hotspot/share/code/scopeDesc.cpp
  7. +1 −2 src/hotspot/share/code/vtableStubs.cpp
  8. +0 −2 src/hotspot/share/gc/g1/g1CollectedHeap.cpp
  9. +0 −4 src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
  10. +1 −1 src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp
  11. +0 −1 src/hotspot/share/gc/g1/g1FullCollector.cpp
  12. +0 −2 src/hotspot/share/gc/g1/g1HeapVerifier.cpp
  13. +0 −3 src/hotspot/share/gc/parallel/psParallelCompact.cpp
  14. +0 −3 src/hotspot/share/gc/parallel/psScavenge.cpp
  15. +6 −5 src/hotspot/share/gc/shared/collectedHeap.cpp
  16. +0 −1 src/hotspot/share/gc/shared/gcVMOperations.cpp
  17. +0 −4 src/hotspot/share/gc/shared/genCollectedHeap.cpp
  18. +3 −2 src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
  19. +0 −5 src/hotspot/share/interpreter/interpreterRuntime.cpp
  20. +0 −1 src/hotspot/share/interpreter/oopMapCache.cpp
  21. +1 −1 src/hotspot/share/jfr/jni/jfrJavaSupport.cpp
  22. +3 −2 src/hotspot/share/jfr/recorder/jfrRecorder.cpp
  23. +2 −2 src/hotspot/share/jvmci/jvmciCodeInstaller.cpp
  24. +2 −2 src/hotspot/share/jvmci/jvmciCompiler.cpp
  25. +7 −7 src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
  26. +1 −1 src/hotspot/share/jvmci/jvmciEnv.cpp
  27. +4 −4 src/hotspot/share/jvmci/jvmciRuntime.cpp
  28. +0 −1 src/hotspot/share/memory/dynamicArchive.cpp
  29. +4 −2 src/hotspot/share/memory/universe.cpp
  30. +1 −2 src/hotspot/share/oops/klassVtable.cpp
  31. +2 −2 src/hotspot/share/prims/jni.cpp
  32. +13 −15 src/hotspot/share/prims/jvmtiEnv.cpp
  33. +9 −7 src/hotspot/share/prims/jvmtiEnvBase.cpp
  34. +1 −1 src/hotspot/share/prims/jvmtiExport.cpp
  35. +3 −2 src/hotspot/share/prims/jvmtiImpl.cpp
  36. +0 −2 src/hotspot/share/prims/jvmtiTagMap.cpp
  37. +2 −2 src/hotspot/share/prims/whitebox.cpp
  38. +7 −5 src/hotspot/share/runtime/deoptimization.cpp
  39. +1 −1 src/hotspot/share/runtime/handles.cpp
  40. +1 −2 src/hotspot/share/runtime/handles.hpp
  41. +0 −4 src/hotspot/share/runtime/handles.inline.hpp
  42. +0 −1 src/hotspot/share/runtime/init.cpp
  43. +0 −2 src/hotspot/share/runtime/java.cpp
  44. +8 −6 src/hotspot/share/runtime/thread.cpp
  45. +3 −3 src/hotspot/share/runtime/vframe.cpp
  46. +4 −2 src/hotspot/share/runtime/vframeArray.cpp
  47. +1 −1 src/hotspot/share/services/attachListener.cpp
  48. +0 −7 src/hotspot/share/services/heapDumper.cpp
  49. +3 −2 src/hotspot/share/services/threadService.cpp
  50. +0 −2 src/hotspot/share/utilities/debug.cpp
@@ -1108,8 +1108,8 @@ void* ciReplay::load_inline_data(ciMethod* method, int entry_bci, int comp_level
}

int ciReplay::replay_impl(TRAPS) {
HandleMark hm;
ResourceMark rm;
HandleMark hm(THREAD);
ResourceMark rm(THREAD);

if (ReplaySuppressInitializers > 2) {
// ReplaySuppressInitializers > 2 means that we want to allow
@@ -306,14 +306,14 @@ LockedClassesDo::~LockedClassesDo() {
// unloading can remove entries concurrently soon.
class ClassLoaderDataGraphIterator : public StackObj {
ClassLoaderData* _next;
Thread* _thread;
HandleMark _hm; // clean up handles when this is done.
Handle _holder;
Thread* _thread;
NoSafepointVerifier _nsv; // No safepoints allowed in this scope
// unless verifying at a safepoint.

public:
ClassLoaderDataGraphIterator() : _next(ClassLoaderDataGraph::_head) {
ClassLoaderDataGraphIterator() : _next(ClassLoaderDataGraph::_head), _thread(Thread::current()), _hm(_thread) {
_thread = Thread::current();
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
}
@@ -173,8 +173,8 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream,
assert(loader_data != NULL, "invariant");
assert(THREAD->is_Java_thread(), "must be a JavaThread");

ResourceMark rm;
HandleMark hm;
ResourceMark rm(THREAD);
HandleMark hm(THREAD);

JvmtiCachedClassFileData* cached_class_file = NULL;

@@ -1773,7 +1773,7 @@ bool SystemDictionaryShared::check_linking_constraints(InstanceKlass* klass, TRA
RunTimeSharedClassInfo* info = RunTimeSharedClassInfo::get_for(klass);
assert(info != NULL, "Sanity");
if (info->_num_loader_constraints > 0) {
HandleMark hm;
HandleMark hm(THREAD);
for (int i = 0; i < info->_num_loader_constraints; i++) {
RunTimeSharedClassInfo::RTLoaderConstraint* lc = info->loader_constraint_at(i);
Symbol* name = lc->constraint_name();
@@ -873,7 +873,6 @@ void nmethod::log_identity(xmlStream* log) const {
void nmethod::log_new_nmethod() const {
if (LogCompilation && xtty != NULL) {
ttyLocker ttyl;
HandleMark hm;
xtty->begin_elem("nmethod");
log_identity(xtty);
xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
@@ -931,7 +930,6 @@ void nmethod::print_nmethod(bool printmethod) {
// Print the header part, then print the requested information.
// This is both handled in decode2().
if (printmethod) {
HandleMark hm;
ResourceMark m;
if (is_compiled_by_c1()) {
tty->cr();
@@ -2405,6 +2403,7 @@ void nmethod::verify() {


void nmethod::verify_interrupt_point(address call_site) {

// Verify IC only when nmethod installation is finished.
if (!is_not_installed()) {
if (CompiledICLocker::is_safe(this)) {
@@ -2415,6 +2414,8 @@ void nmethod::verify_interrupt_point(address call_site) {
}
}

HandleMark hm(Thread::current());

PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
assert(pd != NULL, "PcDesc must exist");
for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
@@ -2554,7 +2555,6 @@ void nmethod::print(outputStream* st) const {
}

void nmethod::print_code() {
HandleMark hm;
ResourceMark m;
ttyLocker ttyl;
// Call the specialized decode method of this class.
@@ -2584,7 +2584,6 @@ void nmethod::print_dependencies() {

// Print the oops from the underlying CodeBlob.
void nmethod::print_oops(outputStream* st) {
HandleMark hm;
ResourceMark m;
st->print("Oops:");
if (oops_begin() < oops_end()) {
@@ -2610,7 +2609,6 @@ void nmethod::print_oops(outputStream* st) {

// Print metadata pool.
void nmethod::print_metadata(outputStream* st) {
HandleMark hm;
ResourceMark m;
st->print("Metadata:");
if (metadata_begin() < metadata_end()) {
@@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -253,7 +253,9 @@ void ScopeDesc::print_on(outputStream* st, PcDesc* pd) const {
#endif

void ScopeDesc::verify() {
ResourceMark rm;
Thread* current_thread = Thread::current();
ResourceMark rm(current_thread);
HandleMark hm(current_thread);
guarantee(method()->is_method(), "type check");

// check if we have any illegal elements on the expression stack
@@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -320,7 +320,6 @@ void VtableStubs::vtable_stub_do(void f(VtableStub*)) {

extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) {
ResourceMark rm;
HandleMark hm;
Klass* klass = receiver->klass();
InstanceKlass* ik = InstanceKlass::cast(klass);
klassVtable vt = ik->vtable();
@@ -3416,7 +3416,6 @@ class G1STWRefProcTaskProxy: public AbstractGangTask {
virtual void work(uint worker_id) {
// The reference processing task executed by a single worker.
ResourceMark rm;
HandleMark hm;

G1STWIsAliveClosure is_alive(_g1h);

@@ -3789,7 +3788,6 @@ class G1EvacuateRegionsBaseTask : public AbstractGangTask {

{
ResourceMark rm;
HandleMark hm;

G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
pss->set_ref_discoverer(_g1h->ref_processor_stw());
@@ -1479,7 +1479,6 @@ class G1CMRefProcTaskProxy : public AbstractGangTask {

virtual void work(uint worker_id) {
ResourceMark rm;
HandleMark hm;
G1CMTask* task = _cm->task(worker_id);
G1CMIsAliveClosure g1_is_alive(_g1h);
G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
@@ -1508,7 +1507,6 @@ void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers)

void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
ResourceMark rm;
HandleMark hm;

// Is alive closure.
G1CMIsAliveClosure g1_is_alive(_g1h);
@@ -1755,7 +1753,6 @@ class G1CMRemarkTask : public AbstractGangTask {
task->record_start_time();
{
ResourceMark rm;
HandleMark hm;

G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
Threads::threads_do(&threads_f);
@@ -1779,7 +1776,6 @@ class G1CMRemarkTask : public AbstractGangTask {

void G1ConcurrentMark::finalize_marking() {
ResourceMark rm;
HandleMark hm;

_g1h->ensure_parsability(false);

@@ -153,7 +153,7 @@ void G1ConcurrentMarkThread::run_service() {
GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
{
ResourceMark rm;
HandleMark hm;

double cycle_start = os::elapsedVTime();

{
@@ -277,7 +277,6 @@ void G1FullCollector::verify_after_marking() {
return;
}

HandleMark hm; // handle scope
#if COMPILER2_OR_JVMCI
DerivedPointerTableDeactivate dpt_deact;
#endif
@@ -449,7 +449,6 @@ class G1ParVerifyTask: public AbstractGangTask {
}

void work(uint worker_id) {
HandleMark hm;
VerifyRegionClosure blk(true, _vo);
_g1h->heap_region_par_iterate_from_worker_offset(&blk, &_hrclaimer, worker_id);
if (blk.failures()) {
@@ -619,7 +618,6 @@ double G1HeapVerifier::verify(G1VerifyType type, VerifyOption vo, const char* ms

if (should_verify(type) && _g1h->total_collections() >= VerifyGCStartAt) {
double verify_start = os::elapsedTime();
HandleMark hm; // Discard invalid handles created during verification
prepare_for_verify();
Universe::verify(vo, msg);
verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
@@ -1007,7 +1007,6 @@ void PSParallelCompact::pre_compact()
heap->ensure_parsability(true); // retire TLABs

if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify("Before GC");
}

@@ -1788,7 +1787,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {

{
ResourceMark rm;
HandleMark hm;

const uint active_workers =
WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().total_workers(),
@@ -1945,7 +1943,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
#endif // ASSERT

if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify("After GC");
}

@@ -430,13 +430,11 @@ bool PSScavenge::invoke_no_policy() {
heap->ensure_parsability(true); // retire TLABs

if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify("Before GC");
}

{
ResourceMark rm;
HandleMark hm;

GCTraceCPUTime tcpu;
GCTraceTime(Info, gc) tm("Pause Young", NULL, gc_cause, true);
@@ -714,7 +712,6 @@ bool PSScavenge::invoke_no_policy() {
}

if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify("After GC");
}

@@ -229,20 +229,21 @@ CollectedHeap::CollectedHeap() :
// heap lock is already held and that we are executing in
// the context of the vm thread.
void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
assert(Thread::current()->is_VM_thread(), "Precondition#1");
Thread* thread = Thread::current();
assert(thread->is_VM_thread(), "Precondition#1");
assert(Heap_lock->is_locked(), "Precondition#2");
GCCauseSetter gcs(this, cause);
switch (cause) {
case GCCause::_heap_inspection:
case GCCause::_heap_dump:
case GCCause::_metadata_GC_threshold : {
HandleMark hm;
HandleMark hm(thread);
do_full_collection(false); // don't clear all soft refs
break;
}
case GCCause::_archive_time_gc:
case GCCause::_metadata_GC_clear_soft_refs: {
HandleMark hm;
HandleMark hm(thread);
do_full_collection(true); // do clear all soft refs
break;
}
@@ -412,14 +413,14 @@ CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
{
DEBUG_ONLY(fill_args_check(start, words);)
HandleMark hm; // Free handles before leaving.
HandleMark hm(Thread::current()); // Free handles before leaving.
fill_with_object_impl(start, words, zap);
}

void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
{
DEBUG_ONLY(fill_args_check(start, words);)
HandleMark hm; // Free handles before leaving.
HandleMark hm(Thread::current()); // Free handles before leaving.

// Multiple objects may be required depending on the filler array maximum size. Fill
// the range up to that with objects that are filler_array_max_size sized. The
@@ -130,7 +130,6 @@ bool VM_GC_HeapInspection::collect() {
}

void VM_GC_HeapInspection::doit() {
HandleMark hm;
Universe::heap()->ensure_parsability(false); // must happen, even if collection does
// not happen (e.g. due to GCLocker)
// or _full_gc being false
@@ -317,7 +317,6 @@ HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,

// Loop until the allocation is satisfied, or unsatisfied after GC.
for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
HandleMark hm; // Discard any handles allocated in each iteration.

// First allocation attempt is lock-free.
Generation *young = _young_gen;
@@ -477,7 +476,6 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);

if (run_verification && VerifyBeforeGC) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify("Before GC");
}
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
@@ -502,7 +500,6 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
// weak refs more uniform (and indeed remove such concerns
// from GCH). XXX

HandleMark hm; // Discard invalid handles created during gc
save_marks(); // save marks for all gens
// We want to discover references, but not process them yet.
// This mode is disabled in process_discovered_references if the
@@ -535,7 +532,6 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
update_gc_stats(gen, full);

if (run_verification && VerifyAfterGC) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify("After GC");
}
}
@@ -654,8 +654,9 @@ class ShenandoahRefProcTaskProxy : public AbstractGangTask {
}

void work(uint worker_id) {
ResourceMark rm;
HandleMark hm;
Thread* current_thread = Thread::current();
ResourceMark rm(current_thread);
HandleMark hm(current_thread);
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahParallelWorkerSession worker_session(worker_id);
@@ -1103,8 +1103,6 @@ JRT_LEAF(void, InterpreterRuntime::verify_mdp(Method* method, address bcp, addre
address mdp2 = mdo->bci_to_dp(bci);
if (mdp != mdp2) {
ResourceMark rm;
ResetNoHandleMark rnm; // In a LEAF entry.
HandleMark hm;
tty->print_cr("FAILED verify : actual mdp %p expected mdp %p @ bci %d", mdp, mdp2, bci);
int current_di = mdo->dp_to_di(mdp);
int expected_di = mdo->dp_to_di(mdp2);
@@ -1125,7 +1123,6 @@ JRT_END
JRT_ENTRY(void, InterpreterRuntime::update_mdp_for_ret(JavaThread* thread, int return_bci))
assert(ProfileInterpreter, "must be profiling interpreter");
ResourceMark rm(thread);
HandleMark hm(thread);
LastFrameAccessor last_frame(thread);
assert(last_frame.is_interpreted_frame(), "must come from interpreter");
MethodData* h_mdo = last_frame.method()->method_data();
@@ -1479,8 +1476,6 @@ JRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* threa
if (src_address == dest_address) {
return;
}
ResetNoHandleMark rnm; // In a LEAF entry.
HandleMark hm;
ResourceMark rm;
LastFrameAccessor last_frame(thread);
assert(last_frame.is_interpreted_frame(), "");
@@ -321,7 +321,6 @@ void OopMapCacheEntry::fill_for_native(const methodHandle& mh) {


void OopMapCacheEntry::fill(const methodHandle& method, int bci) {
HandleMark hm;
// Flush entry to deallocate an existing entry
flush();
set_method(method());

0 comments on commit 9798a08

Please sign in to comment.
You can’t perform that action at this time.