Skip to content
Permalink
Browse files

8248391: Unify handling of all OopStorage instances in weak root proc…

…essing

OopStorage instance now owned by subsystems, and generalize dead entry notification

Co-authored-by: Erik Osterlund <erik.osterlund@oracle.com>
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Reviewed-by: coleenp, tschatzl
  • Loading branch information
3 people committed Jul 16, 2020
1 parent 55af701 commit a8d6a05ce8000a0c479e9ddb66f0858e4b12aae6
Showing with 815 additions and 567 deletions.
  1. +2 −2 src/hotspot/share/classfile/classLoaderData.cpp
  2. +2 −2 src/hotspot/share/classfile/dictionary.cpp
  3. +2 −2 src/hotspot/share/classfile/protectionDomainCache.cpp
  4. +19 −20 src/hotspot/share/classfile/stringTable.cpp
  5. +8 −18 src/hotspot/share/classfile/stringTable.hpp
  6. +3 −3 src/hotspot/share/classfile/systemDictionary.cpp
  7. +3 −3 src/hotspot/share/classfile/systemDictionaryShared.cpp
  8. +11 −1 src/hotspot/share/gc/g1/g1Policy.cpp
  9. +4 −2 src/hotspot/share/gc/g1/g1Policy.hpp
  10. +30 −1 src/hotspot/share/gc/shared/oopStorage.cpp
  11. +20 −1 src/hotspot/share/gc/shared/oopStorage.hpp
  12. +13 −0 src/hotspot/share/gc/shared/oopStorageParState.hpp
  13. +29 −17 src/hotspot/share/gc/shared/oopStorageSet.cpp
  14. +20 −42 src/hotspot/share/gc/shared/oopStorageSet.hpp
  15. +19 −0 src/hotspot/share/gc/shared/oopStorageSetParState.hpp
  16. +59 −2 src/hotspot/share/gc/shared/oopStorageSetParState.inline.hpp
  17. +24 −46 src/hotspot/share/gc/shared/weakProcessor.cpp
  18. +6 −2 src/hotspot/share/gc/shared/weakProcessor.hpp
  19. +5 −8 src/hotspot/share/gc/shared/weakProcessor.inline.hpp
  20. +12 −0 src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp
  21. +29 −0 src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp
  22. +10 −43 src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
  23. +3 −7 src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp
  24. +16 −66 src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
  25. +18 −62 src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.inline.hpp
  26. +3 −3 src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
  27. +9 −68 src/hotspot/share/gc/z/zRootsIterator.cpp
  28. +9 −16 src/hotspot/share/gc/z/zRootsIterator.hpp
  29. +4 −0 src/hotspot/share/gc/z/zWeakRootsProcessor.cpp
  30. +1 −1 src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp
  31. +1 −2 src/hotspot/share/jvmci/jvmciRuntime.cpp
  32. +21 −0 src/hotspot/share/memory/universe.cpp
  33. +9 −1 src/hotspot/share/memory/universe.hpp
  34. +2 −2 src/hotspot/share/prims/jvmtiExport.cpp
  35. +3 −3 src/hotspot/share/prims/jvmtiImpl.cpp
  36. +19 −27 src/hotspot/share/prims/resolvedMethodTable.cpp
  37. +15 −23 src/hotspot/share/prims/resolvedMethodTable.hpp
  38. +2 −2 src/hotspot/share/runtime/init.cpp
  39. +9 −7 src/hotspot/share/runtime/jniHandles.cpp
  40. +5 −0 src/hotspot/share/runtime/jniHandles.hpp
  41. +52 −62 test/hotspot/gtest/gc/shared/test_oopStorageSet.cpp
  42. +284 −0 test/hotspot/jtreg/runtime/stringtable/StringTableCleaningTest.java
@@ -488,7 +488,7 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
if (loader_or_mirror() != NULL) {
assert(_holder.is_null(), "never replace holders");
_holder = WeakHandle(OopStorageSet::vm_weak(), loader_or_mirror);
_holder = WeakHandle(Universe::vm_weak(), loader_or_mirror);
}
}

@@ -655,7 +655,7 @@ ClassLoaderData::~ClassLoaderData() {
ClassLoaderDataGraph::dec_instance_classes(cl.instance_class_released());

// Release the WeakHandle
_holder.release(OopStorageSet::vm_weak());
_holder.release(Universe::vm_weak());

// Release C heap allocated hashtable for all the packages.
if (_packages != NULL) {
@@ -407,14 +407,14 @@ oop SymbolPropertyEntry::method_type() const {
}

void SymbolPropertyEntry::set_method_type(oop p) {
_method_type = OopHandle(OopStorageSet::vm_global(), p);
_method_type = OopHandle(Universe::vm_global(), p);
}

void SymbolPropertyEntry::free_entry() {
// decrement Symbol refcount here because hashtable doesn't.
literal()->decrement_refcount();
// Free OopHandle
_method_type.release(OopStorageSet::vm_global());
_method_type.release(Universe::vm_global());
}

SymbolPropertyTable::SymbolPropertyTable(int table_size)
@@ -94,7 +94,7 @@ void ProtectionDomainCacheTable::unlink() {
LogStream ls(lt);
ls.print_cr("protection domain unlinked at %d", i);
}
entry->literal().release(OopStorageSet::vm_weak());
entry->literal().release(Universe::vm_weak());
*p = entry->next();
free_entry(entry);
}
@@ -181,7 +181,7 @@ ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, uns
protection_domain->print_value_on(&ls);
ls.cr();
}
WeakHandle w(OopStorageSet::vm_weak(), protection_domain);
WeakHandle w(Universe::vm_weak(), protection_domain);
ProtectionDomainCacheEntry* p = new_entry(hash, w);
Hashtable<WeakHandle, mtClass>::add_entry(index, p);
return p;
@@ -85,8 +85,7 @@ static StringTableHash* _local_table = NULL;

volatile bool StringTable::_has_work = false;
volatile bool StringTable::_needs_rehashing = false;

volatile size_t StringTable::_uncleaned_items_count = 0;
OopStorage* StringTable::_oop_storage;

static size_t _current_size = 0;
static volatile size_t _items_count = 0;
@@ -129,7 +128,7 @@ class StringTableConfig : public StackObj {
return AllocateHeap(size, mtSymbol);
}
static void free_node(void* memory, Value const& value) {
value.release(OopStorageSet::string_table_weak());
value.release(StringTable::_oop_storage);
FreeHeap(memory);
StringTable::item_removed();
}
@@ -211,30 +210,24 @@ void StringTable::create_table() {
log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
_current_size, start_size_log_2);
_local_table = new StringTableHash(start_size_log_2, END_SIZE, REHASH_LEN);
_oop_storage = OopStorageSet::create_weak("StringTable Weak");
_oop_storage->register_num_dead_callback(&gc_notification);
}

size_t StringTable::item_added() {
return Atomic::add(&_items_count, (size_t)1);
}

size_t StringTable::add_items_to_clean(size_t ndead) {
size_t total = Atomic::add(&_uncleaned_items_count, (size_t)ndead);
log_trace(stringtable)(
"Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
_uncleaned_items_count, ndead, total);
return total;
}

void StringTable::item_removed() {
Atomic::add(&_items_count, (size_t)-1);
}

double StringTable::get_load_factor() {
return (double)_items_count/_current_size;
return double(_items_count)/double(_current_size);
}

double StringTable::get_dead_factor() {
return (double)_uncleaned_items_count/_current_size;
double StringTable::get_dead_factor(size_t num_dead) {
return double(num_dead)/double(_current_size);
}

size_t StringTable::table_size() {
@@ -243,7 +236,7 @@ size_t StringTable::table_size() {

void StringTable::trigger_concurrent_work() {
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
_has_work = true;
Atomic::store(&_has_work, true);
Service_lock->notify_all();
}

@@ -368,7 +361,7 @@ oop StringTable::do_intern(Handle string_or_null_h, const jchar* name,
bool rehash_warning;
do {
// Callers have already looked up the String using the jchar* name, so just go to add.
WeakHandle wh(OopStorageSet::string_table_weak(), string_h);
WeakHandle wh(_oop_storage, string_h);
// The hash table takes ownership of the WeakHandle, even if it's not inserted.
if (_local_table->insert(THREAD, lookup, wh, &rehash_warning)) {
update_needs_rehash(rehash_warning);
@@ -449,13 +442,15 @@ void StringTable::clean_dead_entries(JavaThread* jt) {
log_debug(stringtable)("Cleaned %ld of %ld", stdc._count, stdc._item);
}

void StringTable::check_concurrent_work() {
if (_has_work) {
void StringTable::gc_notification(size_t num_dead) {
log_trace(stringtable)("Uncleaned items:" SIZE_FORMAT, num_dead);

if (has_work()) {
return;
}

double load_factor = StringTable::get_load_factor();
double dead_factor = StringTable::get_dead_factor();
double dead_factor = StringTable::get_dead_factor(num_dead);
// We should clean/resize if we have more dead than alive,
// more items than preferred load factor or
// more dead items than water mark.
@@ -468,8 +463,11 @@ void StringTable::check_concurrent_work() {
}
}

bool StringTable::has_work() {
return Atomic::load_acquire(&_has_work);
}

void StringTable::do_concurrent_work(JavaThread* jt) {
_has_work = false;
double load_factor = get_load_factor();
log_debug(stringtable, perf)("Concurrent work, live factor: %g", load_factor);
// We prefer growing, since that also removes dead items
@@ -478,6 +476,7 @@ void StringTable::do_concurrent_work(JavaThread* jt) {
} else {
clean_dead_entries(jt);
}
Atomic::release_store(&_has_work, false);
}

// Rehash
@@ -46,23 +46,26 @@ class StringTable : public CHeapObj<mtSymbol>{
friend class StringTableCreateEntry;

static volatile bool _has_work;
static volatile size_t _uncleaned_items_count;

// Set if one bucket is out of balance due to hash algorithm deficiency
static volatile bool _needs_rehashing;

static OopStorage* _oop_storage;

static void grow(JavaThread* jt);
static void clean_dead_entries(JavaThread* jt);

static double get_load_factor();
static double get_dead_factor();
static double get_dead_factor(size_t num_dead);

// GC support

static void check_concurrent_work();
// Callback for GC to notify of changes that might require cleaning or resize.
static void gc_notification(size_t num_dead);
static void trigger_concurrent_work();

static size_t item_added();
static void item_removed();
static size_t add_items_to_clean(size_t ndead);

static oop intern(Handle string_or_null_h, const jchar* name, int len, TRAPS);
static oop do_intern(Handle string_or_null, const jchar* name, int len, uintx hash, TRAPS);
@@ -79,20 +82,7 @@ class StringTable : public CHeapObj<mtSymbol>{
static void create_table();

static void do_concurrent_work(JavaThread* jt);
static bool has_work() { return _has_work; }

// GC support

// Must be called before a parallel walk where strings might die.
static void reset_dead_counter() { _uncleaned_items_count = 0; }

// After the parallel walk this method must be called to trigger
// cleaning. Note it might trigger a resize instead.
static void finish_dead_counter() { check_concurrent_work(); }

// If GC uses ParState directly it should add the number of cleared
// strings to this method.
static void inc_dead_counter(size_t ndead) { add_items_to_clean(ndead); }
static bool has_work();

// Probing
static oop lookup(Symbol* symbol);
@@ -176,15 +176,15 @@ void SystemDictionary::compute_java_loaders(TRAPS) {
vmSymbols::void_classloader_signature(),
CHECK);

_java_system_loader = OopHandle(OopStorageSet::vm_global(), (oop)result.get_jobject());
_java_system_loader = OopHandle(Universe::vm_global(), (oop)result.get_jobject());

JavaCalls::call_static(&result,
class_loader_klass,
vmSymbols::getPlatformClassLoader_name(),
vmSymbols::void_classloader_signature(),
CHECK);

_java_platform_loader = OopHandle(OopStorageSet::vm_global(), (oop)result.get_jobject());
_java_platform_loader = OopHandle(Universe::vm_global(), (oop)result.get_jobject());
}

ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, bool create_mirror_cld) {
@@ -2030,7 +2030,7 @@ void SystemDictionary::initialize(TRAPS) {

// Allocate private object used as system class loader lock
oop lock_obj = oopFactory::new_intArray(0, CHECK);
_system_loader_lock_obj = OopHandle(OopStorageSet::vm_global(), lock_obj);
_system_loader_lock_obj = OopHandle(Universe::vm_global(), lock_obj);

// Initialize basic classes
resolve_well_known_classes(CHECK);
@@ -1023,23 +1023,23 @@ void SystemDictionaryShared::allocate_shared_protection_domain_array(int size, T
if (_shared_protection_domains.resolve() == NULL) {
oop spd = oopFactory::new_objArray(
SystemDictionary::ProtectionDomain_klass(), size, CHECK);
_shared_protection_domains = OopHandle(OopStorageSet::vm_global(), spd);
_shared_protection_domains = OopHandle(Universe::vm_global(), spd);
}
}

void SystemDictionaryShared::allocate_shared_jar_url_array(int size, TRAPS) {
if (_shared_jar_urls.resolve() == NULL) {
oop sju = oopFactory::new_objArray(
SystemDictionary::URL_klass(), size, CHECK);
_shared_jar_urls = OopHandle(OopStorageSet::vm_global(), sju);
_shared_jar_urls = OopHandle(Universe::vm_global(), sju);
}
}

void SystemDictionaryShared::allocate_shared_jar_manifest_array(int size, TRAPS) {
if (_shared_jar_manifests.resolve() == NULL) {
oop sjm = oopFactory::new_objArray(
SystemDictionary::Jar_Manifest_klass(), size, CHECK);
_shared_jar_manifests = OopHandle(OopStorageSet::vm_global(), sjm);
_shared_jar_manifests = OopHandle(Universe::vm_global(), sjm);
}
}

@@ -77,7 +77,8 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
_concurrent_start_to_mixed(),
_collection_set(NULL),
_g1h(NULL),
_phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
_phase_times_timer(gc_timer),
_phase_times(NULL),
_mark_remark_start_sec(0),
_mark_cleanup_start_sec(0),
_tenuring_threshold(MaxTenuringThreshold),
@@ -401,6 +402,15 @@ double G1Policy::predict_survivor_regions_evac_time() const {
return survivor_regions_evac_time;
}

G1GCPhaseTimes* G1Policy::phase_times() const {
// Lazy allocation because it must follow initialization of all the
// OopStorage objects by various other subsystems.
if (_phase_times == NULL) {
_phase_times = new G1GCPhaseTimes(_phase_times_timer, ParallelGCThreads);
}
return _phase_times;
}

void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );

@@ -182,7 +182,9 @@ class G1Policy: public CHeapObj<mtGC> {
// Stash a pointer to the g1 heap.
G1CollectedHeap* _g1h;

G1GCPhaseTimes* _phase_times;
STWGCTimer* _phase_times_timer;
// Lazily initialized
mutable G1GCPhaseTimes* _phase_times;

// This set of variables tracks the collector efficiency, in order to
// determine whether we should initiate a new marking.
@@ -300,7 +302,7 @@ class G1Policy: public CHeapObj<mtGC> {

G1CollectorState* collector_state() const;

G1GCPhaseTimes* phase_times() const { return _phase_times; }
G1GCPhaseTimes* phase_times() const;

// Check the current value of the young list RSet length and
// compare it against the last prediction. If the current value is
@@ -746,6 +746,7 @@ OopStorage::OopStorage(const char* name) :
_deferred_updates(NULL),
_allocation_mutex(make_oopstorage_mutex(name, "alloc", Mutex::oopstorage)),
_active_mutex(make_oopstorage_mutex(name, "active", Mutex::oopstorage - 1)),
_num_dead_callback(NULL),
_allocation_count(0),
_concurrent_iteration_count(0),
_needs_cleanup(false)
@@ -814,6 +815,21 @@ static jlong cleanup_trigger_permit_time = 0;
// too frequent.
const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;

void OopStorage::register_num_dead_callback(NumDeadCallback f) {
assert(_num_dead_callback == NULL, "Only one callback function supported");
_num_dead_callback = f;
}

void OopStorage::report_num_dead(size_t num_dead) const {
if (_num_dead_callback != NULL) {
_num_dead_callback(num_dead);
}
}

bool OopStorage::should_report_num_dead() const {
return _num_dead_callback != NULL;
}

void OopStorage::trigger_cleanup_if_needed() {
MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
if (Atomic::load(&needs_cleanup_requested) &&
@@ -970,7 +986,8 @@ OopStorage::BasicParState::BasicParState(const OopStorage* storage,
_block_count(0), // initialized properly below
_next_block(0),
_estimated_thread_count(estimated_thread_count),
_concurrent(concurrent)
_concurrent(concurrent),
_num_dead(0)
{
assert(estimated_thread_count > 0, "estimated thread count must be positive");
update_concurrent_iteration_count(1);
@@ -1043,6 +1060,18 @@ bool OopStorage::BasicParState::finish_iteration(const IterationData* data) cons
return false;
}

size_t OopStorage::BasicParState::num_dead() const {
return Atomic::load(&_num_dead);
}

void OopStorage::BasicParState::increment_num_dead(size_t num_dead) {
Atomic::add(&_num_dead, num_dead);
}

void OopStorage::BasicParState::report_num_dead() const {
_storage->report_num_dead(Atomic::load(&_num_dead));
}

const char* OopStorage::name() const { return _name; }

#ifndef PRODUCT

0 comments on commit a8d6a05

Please sign in to comment.