Skip to content

Commit

Permalink
8213587: Speed up CDS dump time by using resizable hashtables
Browse files Browse the repository at this point in the history
Reviewed-by: jiangli, coleenp, gziemski
  • Loading branch information
iklam committed Nov 21, 2018
1 parent 6d3df94 commit 6d26993
Show file tree
Hide file tree
Showing 21 changed files with 264 additions and 105 deletions.
11 changes: 6 additions & 5 deletions src/hotspot/share/classfile/classListParser.cpp
Expand Up @@ -389,8 +389,8 @@ Klass* ClassListParser::load_current_class(TRAPS) {
InstanceKlass* ik = InstanceKlass::cast(klass);
int id = this->id();
SystemDictionaryShared::update_shared_entry(ik, id);
InstanceKlass* old = table()->lookup(id);
if (old != NULL && old != ik) {
InstanceKlass** old_ptr = table()->lookup(id);
if (old_ptr != NULL) {
error("Duplicated ID %d for class %s", id, _class_name);
}
table()->add(id, ik);
Expand All @@ -404,11 +404,12 @@ bool ClassListParser::is_loading_from_source() {
}

InstanceKlass* ClassListParser::lookup_class_by_id(int id) {
InstanceKlass* klass = table()->lookup(id);
if (klass == NULL) {
InstanceKlass** klass_ptr = table()->lookup(id);
if (klass_ptr == NULL) {
error("Class ID %d has not been defined", id);
}
return klass;
assert(*klass_ptr != NULL, "must be");
return *klass_ptr;
}


Expand Down
28 changes: 5 additions & 23 deletions src/hotspot/share/classfile/classListParser.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -27,30 +27,12 @@

#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/hashtable.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/hashtable.inline.hpp"

class CDSClassInfo;

// Look up from ID -> InstanceKlass*
class ID2KlassTable : public Hashtable<InstanceKlass*, mtClass> {
class ID2KlassTable : public KVHashtable<int, InstanceKlass*, mtInternal> {
public:
ID2KlassTable() : Hashtable<InstanceKlass*, mtClass>(1987, sizeof(HashtableEntry<InstanceKlass*, mtClass>)) { }
void add(int id, InstanceKlass* klass) {
unsigned int hash = (unsigned int)id;
HashtableEntry<InstanceKlass*, mtClass>* entry = new_entry(hash, klass);
add_entry(hash_to_index(hash), entry);
}

InstanceKlass* lookup(int id) {
unsigned int hash = (unsigned int)id;
int index = hash_to_index(id);
for (HashtableEntry<InstanceKlass*, mtClass>* e = bucket(index); e != NULL; e = e->next()) {
if (e->hash() == hash) {
return e->literal();
}
}
return NULL;
}
ID2KlassTable() : KVHashtable<int, InstanceKlass*, mtInternal>(1987) {}
};

class ClassListParser : public StackObj {
Expand Down
2 changes: 0 additions & 2 deletions src/hotspot/share/classfile/classLoaderExt.cpp
Expand Up @@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classListParser.hpp"
#include "classfile/classLoader.inline.hpp"
#include "classfile/classLoaderExt.hpp"
#include "classfile/classLoaderData.inline.hpp"
Expand Down Expand Up @@ -257,7 +256,6 @@ void ClassLoaderExt::finalize_shared_paths_misc_info() {
// the "source:" in the class list file (see classListParser.cpp), and can be a directory or
// a JAR file.
InstanceKlass* ClassLoaderExt::load_class(Symbol* name, const char* path, TRAPS) {

assert(name != NULL, "invariant");
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
ResourceMark rm(THREAD);
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/classfile/compactHashtable.hpp
Expand Up @@ -27,7 +27,7 @@

#include "oops/array.hpp"
#include "oops/symbol.hpp"
#include "utilities/hashtable.hpp"
#include "utilities/growableArray.hpp"


template <
Expand Down
1 change: 0 additions & 1 deletion src/hotspot/share/classfile/dictionary.cpp
Expand Up @@ -67,7 +67,6 @@ Dictionary::~Dictionary() {
}
assert(number_of_entries() == 0, "should have removed all entries");
assert(new_entry_free_list() == NULL, "entry present on Dictionary's free list");
free_buckets();
}

DictionaryEntry* Dictionary::new_entry(unsigned int hash, InstanceKlass* klass) {
Expand Down
1 change: 0 additions & 1 deletion src/hotspot/share/classfile/moduleEntry.cpp
Expand Up @@ -352,7 +352,6 @@ ModuleEntryTable::~ModuleEntryTable() {
}
assert(number_of_entries() == 0, "should have removed all entries");
assert(new_entry_free_list() == NULL, "entry present on ModuleEntryTable's free list");
free_buckets();
}

ModuleEntry* ModuleEntryTable::new_entry(unsigned int hash, Handle module_handle,
Expand Down
1 change: 0 additions & 1 deletion src/hotspot/share/classfile/packageEntry.cpp
Expand Up @@ -191,7 +191,6 @@ PackageEntryTable::~PackageEntryTable() {
}
assert(number_of_entries() == 0, "should have removed all entries");
assert(new_entry_free_list() == NULL, "entry present on PackageEntryTable's free list");
free_buckets();
}

PackageEntry* PackageEntryTable::new_entry(unsigned int hash, Symbol* name, ModuleEntry* module) {
Expand Down
6 changes: 4 additions & 2 deletions src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -76,7 +76,9 @@ G1CodeRootSetTable::~G1CodeRootSetTable() {
}
}
assert(number_of_entries() == 0, "should have removed all entries");
free_buckets();
// Each of the entries in new_entry_free_list() have been allocated in
// G1CodeRootSetTable::new_entry(). We never call the block allocator
// in BasicHashtable::new_entry().
for (BasicHashtableEntry<mtGC>* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) {
FREE_C_HEAP_ARRAY(char, e);
}
Expand Down
10 changes: 6 additions & 4 deletions src/hotspot/share/memory/metaspaceClosure.cpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -55,13 +55,15 @@ void MetaspaceClosure::push_impl(MetaspaceClosure::Ref* ref, Writability w) {
}

bool UniqueMetaspaceClosure::do_ref(MetaspaceClosure::Ref* ref, bool read_only) {
bool* found = _has_been_visited.get(ref->obj());
bool* found = _has_been_visited.lookup(ref->obj());
if (found != NULL) {
assert(*found == read_only, "must be");
return false; // Already visited: no need to iterate embedded pointers.
} else {
bool isnew = _has_been_visited.put(ref->obj(), read_only);
assert(isnew, "sanity");
_has_been_visited.add(ref->obj(), read_only);
if (_has_been_visited.maybe_grow(MAX_TABLE_SIZE)) {
log_info(cds, hashtables)("Expanded _has_been_visited table to %d", _has_been_visited.table_size());
}
do_unique_ref(ref, read_only);
return true; // Saw this for the first time: iterate the embedded pointers.
}
Expand Down
22 changes: 8 additions & 14 deletions src/hotspot/share/memory/metaspaceClosure.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -29,7 +29,7 @@
#include "memory/allocation.hpp"
#include "oops/array.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/resourceHash.hpp"
#include "utilities/hashtable.inline.hpp"

// The metadata hierarchy is separate from the oop hierarchy
class MetaspaceObj; // no C++ vtable
Expand Down Expand Up @@ -258,25 +258,19 @@ class MetaspaceClosure {

// This is a special MetaspaceClosure that visits each unique MetaspaceObj once.
class UniqueMetaspaceClosure : public MetaspaceClosure {
static const int INITIAL_TABLE_SIZE = 15889;
static const int MAX_TABLE_SIZE = 1000000;

// Do not override. Returns true if we are discovering ref->obj() for the first time.
virtual bool do_ref(Ref* ref, bool read_only);

public:
// Gets called the first time we discover an object.
virtual void do_unique_ref(Ref* ref, bool read_only) = 0;
UniqueMetaspaceClosure() : _has_been_visited(INITIAL_TABLE_SIZE) {}

private:
static unsigned my_hash(const address& a) {
return primitive_hash<address>(a);
}
static bool my_equals(const address& a0, const address& a1) {
return primitive_equals<address>(a0, a1);
}
ResourceHashtable<
address, bool,
UniqueMetaspaceClosure::my_hash, // solaris compiler doesn't like: primitive_hash<address>
UniqueMetaspaceClosure::my_equals, // solaris compiler doesn't like: primitive_equals<address>
15889, // prime number
ResourceObj::C_HEAP> _has_been_visited;
KVHashtable<address, bool, mtInternal> _has_been_visited;
};

#endif // SHARE_VM_MEMORY_METASPACE_ITERATOR_HPP
30 changes: 13 additions & 17 deletions src/hotspot/share/memory/metaspaceShared.cpp
Expand Up @@ -64,6 +64,7 @@
#include "utilities/align.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/hashtable.inline.hpp"
#if INCLUDE_G1GC
#include "gc/g1/g1CollectedHeap.hpp"
#endif
Expand Down Expand Up @@ -1067,26 +1068,19 @@ class SortedSymbolClosure: public SymbolClosure {
// metaspace data into their final location in the shared regions.

class ArchiveCompactor : AllStatic {
static const int INITIAL_TABLE_SIZE = 8087;
static const int MAX_TABLE_SIZE = 1000000;

static DumpAllocStats* _alloc_stats;
static SortedSymbolClosure* _ssc;

static unsigned my_hash(const address& a) {
return primitive_hash<address>(a);
}
static bool my_equals(const address& a0, const address& a1) {
return primitive_equals<address>(a0, a1);
}
typedef ResourceHashtable<
address, address,
ArchiveCompactor::my_hash, // solaris compiler doesn't like: primitive_hash<address>
ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address>
16384, ResourceObj::C_HEAP> RelocationTable;
typedef KVHashtable<address, address, mtInternal> RelocationTable;
static RelocationTable* _new_loc_table;

public:
static void initialize() {
_alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats;
_new_loc_table = new(ResourceObj::C_HEAP, mtInternal)RelocationTable;
_new_loc_table = new RelocationTable(INITIAL_TABLE_SIZE);
}
static DumpAllocStats* alloc_stats() {
return _alloc_stats;
Expand Down Expand Up @@ -1136,15 +1130,17 @@ class ArchiveCompactor : AllStatic {
newtop = _rw_region.top();
}
memcpy(p, obj, bytes);
bool isnew = _new_loc_table->put(obj, (address)p);
assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once");
_new_loc_table->add(obj, (address)p);
log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
assert(isnew, "must be");

if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) {
log_info(cds, hashtables)("Expanded _new_loc_table to %d", _new_loc_table->table_size());
}
_alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only);
}

static address get_new_loc(MetaspaceClosure::Ref* ref) {
address* pp = _new_loc_table->get(ref->obj());
address* pp = _new_loc_table->lookup(ref->obj());
assert(pp != NULL, "must be");
return *pp;
}
Expand Down Expand Up @@ -1288,7 +1284,7 @@ class ArchiveCompactor : AllStatic {

static Klass* get_relocated_klass(Klass* orig_klass) {
assert(DumpSharedSpaces, "dump time only");
address* pp = _new_loc_table->get((address)orig_klass);
address* pp = _new_loc_table->lookup((address)orig_klass);
assert(pp != NULL, "must be");
Klass* klass = (Klass*)(*pp);
assert(klass->is_klass(), "must be");
Expand Down
13 changes: 13 additions & 0 deletions src/hotspot/share/utilities/globalDefinitions.hpp
Expand Up @@ -1261,4 +1261,17 @@ static inline void* dereference_vptr(const void* addr) {
typedef const char* ccstr;
typedef const char* ccstrlist; // represents string arguments which accumulate

//----------------------------------------------------------------------------------------------------
// Default hash/equals functions used by ResourceHashtable and KVHashtable

template<typename K> unsigned primitive_hash(const K& k) {
unsigned hash = (unsigned)((uintptr_t)k);
return hash ^ (hash >> 3); // just in case we're dealing with aligned ptrs
}

template<typename K> bool primitive_equals(const K& k0, const K& k1) {
return k0 == k1;
}


#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_HPP
19 changes: 18 additions & 1 deletion src/hotspot/share/utilities/hashtable.cpp
Expand Up @@ -65,6 +65,7 @@ template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsig
len = 1 << log2_intptr(len); // round down to power of 2
assert(len >= _entry_size, "");
_first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC);
_entry_blocks->append(_first_free_entry);
_end_block = _first_free_entry + len;
}
entry = (BasicHashtableEntry<F>*)_first_free_entry;
Expand All @@ -86,7 +87,9 @@ template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(
}

// Version of hashtable entry allocation that allocates in the C heap directly.
// The allocator in blocks is preferable but doesn't have free semantics.
// The block allocator in BasicHashtable has less fragmentation, but the memory is not freed until
// the whole table is freed. Use allocate_new_entry() if you want to individually free the memory
// used by each entry
template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::allocate_new_entry(unsigned int hashValue, T obj) {
HashtableEntry<T, F>* entry = (HashtableEntry<T, F>*) NEW_C_HEAP_ARRAY(char, this->entry_size(), F);

Expand Down Expand Up @@ -203,6 +206,20 @@ template <MEMFLAGS F> bool BasicHashtable<F>::resize(int new_size) {
return true;
}

template <MEMFLAGS F> bool BasicHashtable<F>::maybe_grow(int max_size, int load_factor) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");

if (table_size() >= max_size) {
return false;
}
if (number_of_entries() / table_size() > load_factor) {
resize(MIN2<int>(table_size() * 2, max_size));
return true;
} else {
return false;
}
}

// Dump footprint and bucket length statistics
//
// Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to
Expand Down

0 comments on commit 6d26993

Please sign in to comment.