Skip to content
Permalink
Browse files
8263976: Remove block allocation from BasicHashtable
Reviewed-by: lfoltan, iklam
  • Loading branch information
coleenp committed Mar 23, 2021
1 parent fbd57bd commit 5bc382fb7a051b3b319c55ab2d025b49848040b1
Show file tree
Hide file tree
Showing 16 changed files with 47 additions and 168 deletions.
@@ -74,11 +74,10 @@ Dictionary::~Dictionary() {
}
}
assert(number_of_entries() == 0, "should have removed all entries");
assert(new_entry_free_list() == NULL, "entry present on Dictionary's free list");
}

DictionaryEntry* Dictionary::new_entry(unsigned int hash, InstanceKlass* klass) {
DictionaryEntry* entry = (DictionaryEntry*)Hashtable<InstanceKlass*, mtClass>::allocate_new_entry(hash, klass);
DictionaryEntry* entry = (DictionaryEntry*)Hashtable<InstanceKlass*, mtClass>::new_entry(hash, klass);
entry->set_pd_set(NULL);
assert(klass->is_instance_klass(), "Must be");
return entry;
@@ -95,9 +94,7 @@ void Dictionary::free_entry(DictionaryEntry* entry) {
entry->set_pd_set(to_delete->next());
delete to_delete;
}
// Unlink from the Hashtable prior to freeing
unlink_entry(entry);
FREE_C_HEAP_ARRAY(char, entry);
BasicHashtable<mtClass>::free_entry(entry);
}

const int _resize_load_trigger = 5; // load factor that will trigger the resize
@@ -551,7 +548,7 @@ void SymbolPropertyTable::methods_do(void f(Method*)) {

void SymbolPropertyTable::free_entry(SymbolPropertyEntry* entry) {
entry->free_entry();
Hashtable<Symbol*, mtSymbol>::free_entry(entry);
BasicHashtable<mtSymbol>::free_entry(entry);
}

void DictionaryEntry::verify_protection_domain_set() {
@@ -92,14 +92,6 @@ class Dictionary : public Hashtable<InstanceKlass*, mtClass> {
return (DictionaryEntry**)Hashtable<InstanceKlass*, mtClass>::bucket_addr(i);
}

void add_entry(int index, DictionaryEntry* new_entry) {
Hashtable<InstanceKlass*, mtClass>::add_entry(index, (HashtableEntry<InstanceKlass*, mtClass>*)new_entry);
}

void unlink_entry(DictionaryEntry* entry) {
Hashtable<InstanceKlass*, mtClass>::unlink_entry((HashtableEntry<InstanceKlass*, mtClass>*)entry);
}

void free_entry(DictionaryEntry* entry);

bool is_valid_protection_domain(unsigned int hash,
@@ -58,7 +58,7 @@ LoaderConstraintEntry* LoaderConstraintTable::new_entry(
void LoaderConstraintTable::free_entry(LoaderConstraintEntry *entry) {
// decrement name refcount before freeing
entry->name()->decrement_refcount();
Hashtable<InstanceKlass*, mtClass>::free_entry(entry);
BasicHashtable<mtClass>::free_entry(entry);
}

// The loaderConstraintTable must always be accessed with the
@@ -357,14 +357,10 @@ ModuleEntryTable::~ModuleEntryTable() {
if (to_remove->location() != NULL) {
to_remove->location()->decrement_refcount();
}

// Unlink from the Hashtable prior to freeing
unlink_entry(to_remove);
FREE_C_HEAP_ARRAY(char, to_remove);
BasicHashtable<mtModule>::free_entry(to_remove);
}
}
assert(number_of_entries() == 0, "should have removed all entries");
assert(new_entry_free_list() == NULL, "entry present on ModuleEntryTable's free list");
}

void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
@@ -579,7 +575,7 @@ ModuleEntry* ModuleEntryTable::new_entry(unsigned int hash, Handle module_handle
Symbol* version, Symbol* location,
ClassLoaderData* loader_data) {
assert(Module_lock->owned_by_self(), "should have the Module_lock");
ModuleEntry* entry = (ModuleEntry*)Hashtable<Symbol*, mtModule>::allocate_new_entry(hash, name);
ModuleEntry* entry = (ModuleEntry*)Hashtable<Symbol*, mtModule>::new_entry(hash, name);

// Initialize fields specific to a ModuleEntry
entry->init();
@@ -187,13 +187,10 @@ PackageEntryTable::~PackageEntryTable() {
to_remove->delete_qualified_exports();
to_remove->name()->decrement_refcount();

// Unlink from the Hashtable prior to freeing
unlink_entry(to_remove);
FREE_C_HEAP_ARRAY(char, to_remove);
BasicHashtable<mtModule>::free_entry(to_remove);
}
}
assert(number_of_entries() == 0, "should have removed all entries");
assert(new_entry_free_list() == NULL, "entry present on PackageEntryTable's free list");
}

#if INCLUDE_CDS_JAVA_HEAP
@@ -322,7 +319,7 @@ void PackageEntryTable::load_archived_entries(Array<PackageEntry*>* archived_pac

PackageEntry* PackageEntryTable::new_entry(unsigned int hash, Symbol* name, ModuleEntry* module) {
assert(Module_lock->owned_by_self(), "should have the Module_lock");
PackageEntry* entry = (PackageEntry*)Hashtable<Symbol*, mtModule>::allocate_new_entry(hash, name);
PackageEntry* entry = (PackageEntry*)Hashtable<Symbol*, mtModule>::new_entry(hash, name);

JFR_ONLY(INIT_ID(entry);)

@@ -193,7 +193,7 @@ void PlaceholderTable::free_entry(PlaceholderEntry* entry) {
// decrement Symbol refcount here because Hashtable doesn't.
entry->literal()->decrement_refcount();
if (entry->supername() != NULL) entry->supername()->decrement_refcount();
Hashtable<Symbol*, mtClass>::free_entry(entry);
BasicHashtable<mtClass>::free_entry(entry);
}


@@ -137,7 +137,7 @@ void ResolutionErrorTable::free_entry(ResolutionErrorEntry *entry) {
if (entry->nest_host_error() != NULL) {
FREE_C_HEAP_ARRAY(char, entry->nest_host_error());
}
Hashtable<ConstantPool*, mtClass>::free_entry(entry);
BasicHashtable<mtClass>::free_entry(entry);
}


@@ -1576,13 +1576,14 @@ InstanceKlass* SystemDictionary::find_or_define_helper(Symbol* class_name, Handl
// Other cases fall through, and may run into duplicate defines
// caught by finding an entry in the SystemDictionary
if (is_parallelDefine(class_loader) && (probe->instance_klass() != NULL)) {
InstanceKlass* ik = probe->instance_klass();
placeholders()->find_and_remove(name_hash, name_h, loader_data, PlaceholderTable::DEFINE_CLASS, THREAD);
SystemDictionary_lock->notify_all();
#ifdef ASSERT
InstanceKlass* check = dictionary->find_class(name_hash, name_h);
assert(check != NULL, "definer missed recording success");
#endif
return probe->instance_klass();
return ik;
} else {
// This thread will define the class (even if earlier thread tried and had an error)
probe->set_definer(THREAD);
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,14 +45,7 @@ size_t G1CodeRootSetTable::mem_size() {

G1CodeRootSetTable::Entry* G1CodeRootSetTable::new_entry(nmethod* nm) {
unsigned int hash = compute_hash(nm);
Entry* entry = (Entry*) new_entry_free_list();
if (entry == NULL) {
entry = (Entry*) NEW_C_HEAP_ARRAY2(char, entry_size(), mtGC, CURRENT_PC);
}
entry->set_next(NULL);
entry->set_hash(hash);
entry->set_literal(nm);
return entry;
return (Entry*)Hashtable<nmethod*, mtGC>::new_entry(hash, nm);
}

void G1CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
@@ -73,17 +66,10 @@ G1CodeRootSetTable::~G1CodeRootSetTable() {
Entry* to_remove = e;
// read next before freeing.
e = e->next();
unlink_entry(to_remove);
FREE_C_HEAP_ARRAY(char, to_remove);
BasicHashtable<mtGC>::free_entry(to_remove);
}
}
assert(number_of_entries() == 0, "should have removed all entries");
// Each of the entries in new_entry_free_list() have been allocated in
// G1CodeRootSetTable::new_entry(). We never call the block allocator
// in BasicHashtable::new_entry().
for (BasicHashtableEntry<mtGC>* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) {
FREE_C_HEAP_ARRAY(char, e);
}
}

bool G1CodeRootSetTable::add(nmethod* nm) {
@@ -124,7 +110,6 @@ void G1CodeRootSetTable::copy_to(G1CodeRootSetTable* new_table) {
new_table->add(e->literal());
}
}
new_table->copy_freelist(this);
}

void G1CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
@@ -94,6 +94,7 @@
#include "services/threadService.hpp"
#include "utilities/copy.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/macros.hpp"
#include "utilities/utf8.hpp"
@@ -64,7 +64,6 @@ void JvmtiTagMapTable::clear() {
*p = NULL; // clear out buckets.
}
assert(number_of_entries() == 0, "should have removed all entries");
assert(new_entry_free_list() == NULL, "entry present on JvmtiTagMapTable's free list");
}

JvmtiTagMapTable::~JvmtiTagMapTable() {
@@ -74,15 +73,14 @@ JvmtiTagMapTable::~JvmtiTagMapTable() {

// Entries are C_Heap allocated
JvmtiTagMapEntry* JvmtiTagMapTable::new_entry(unsigned int hash, WeakHandle w, jlong tag) {
JvmtiTagMapEntry* entry = (JvmtiTagMapEntry*)Hashtable<WeakHandle, mtServiceability>::allocate_new_entry(hash, w);
JvmtiTagMapEntry* entry = (JvmtiTagMapEntry*)Hashtable<WeakHandle, mtServiceability>::new_entry(hash, w);
entry->set_tag(tag);
return entry;
}

void JvmtiTagMapTable::free_entry(JvmtiTagMapEntry* entry) {
unlink_entry(entry);
entry->literal().release(JvmtiExport::weak_tag_storage()); // release to OopStorage
FREE_C_HEAP_ARRAY(char, entry);
BasicHashtable<mtServiceability>::free_entry(entry);
}

unsigned int JvmtiTagMapTable::compute_hash(oop obj) {
@@ -491,10 +491,6 @@ typedef HashtableEntry<InstanceKlass*, mtClass> KlassHashtableEntry;
\
nonstatic_field(BasicHashtable<mtInternal>, _table_size, int) \
nonstatic_field(BasicHashtable<mtInternal>, _buckets, HashtableBucket<mtInternal>*) \
volatile_nonstatic_field(BasicHashtable<mtInternal>, _free_list, BasicHashtableEntry<mtInternal>*) \
nonstatic_field(BasicHashtable<mtInternal>, _first_free_entry, char*) \
nonstatic_field(BasicHashtable<mtInternal>, _end_block, char*) \
nonstatic_field(BasicHashtable<mtInternal>, _entry_size, int) \
\
/*******************/ \
/* ClassLoaderData */ \
@@ -23,20 +23,19 @@
*/

#include "precompiled.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/dictionary.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/placeholders.hpp"
#include "classfile/protectionDomainCache.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/vmClasses.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "oops/weakHandle.inline.hpp"
#include "prims/jvmtiTagMapTable.hpp"
#include "runtime/safepoint.hpp"
@@ -45,67 +44,31 @@
#include "utilities/hashtable.inline.hpp"
#include "utilities/numberSeq.hpp"


// This hashtable is implemented as an open hash table with a fixed number of buckets.

template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() {
BasicHashtableEntry<F>* entry = NULL;
if (_free_list != NULL) {
entry = _free_list;
_free_list = _free_list->next();
}
return entry;
}
// Hashtable entry allocates in the C heap directly.

// HashtableEntrys are allocated in blocks to reduce the space overhead.
template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
BasicHashtableEntry<F>* entry = new_entry_free_list();

if (entry == NULL) {
if (_first_free_entry + _entry_size >= _end_block) {
int block_size = MAX2((int)_table_size / 2, (int)_number_of_entries); // pick a reasonable value
block_size = clamp(block_size, 2, 512); // but never go out of this range
int len = round_down_power_of_2(_entry_size * block_size);
assert(len >= _entry_size, "");
_first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC);
_entry_blocks.append(_first_free_entry);
_end_block = _first_free_entry + len;
}
entry = (BasicHashtableEntry<F>*)_first_free_entry;
_first_free_entry += _entry_size;
}

assert(_entry_size % HeapWordSize == 0, "");
entry->set_hash(hashValue);
BasicHashtableEntry<F>* entry = ::new (NEW_C_HEAP_ARRAY(char, this->entry_size(), F))
BasicHashtableEntry<F>(hashValue);
return entry;
}


template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) {
HashtableEntry<T, F>* entry;

entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue);
entry->set_literal(obj);
HashtableEntry<T, F>* entry = ::new (NEW_C_HEAP_ARRAY(char, this->entry_size(), F))
HashtableEntry<T, F>(hashValue, obj);
return entry;
}

// Version of hashtable entry allocation that allocates in the C heap directly.
// The block allocator in BasicHashtable has less fragmentation, but the memory is not freed until
// the whole table is freed. Use allocate_new_entry() if you want to individually free the memory
// used by each entry
template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::allocate_new_entry(unsigned int hashValue, T obj) {
HashtableEntry<T, F>* entry = (HashtableEntry<T, F>*) NEW_C_HEAP_ARRAY(char, this->entry_size(), F);

if (DumpSharedSpaces) {
// Avoid random bits in structure padding so we can have deterministic content in CDS archive
memset((void*)entry, 0, this->entry_size());
}
entry->set_hash(hashValue);
entry->set_literal(obj);
entry->set_next(NULL);
return entry;
template <MEMFLAGS F> inline void BasicHashtable<F>::free_entry(BasicHashtableEntry<F>* entry) {
// Unlink from the Hashtable prior to freeing
unlink_entry(entry);
FREE_C_HEAP_ARRAY(char, entry);
JFR_ONLY(_stats_rate.remove();)
}


template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() {
FREE_C_HEAP_ARRAY(HashtableBucket, _buckets);
_buckets = NULL;

0 comments on commit 5bc382f

Please sign in to comment.