Skip to content
This repository has been archived by the owner on Sep 2, 2022. It is now read-only.

Commit

Permalink
Browse files Browse the repository at this point in the history
8259063: Possible deadlock with vtable/itable creation vs concurrent …
…class unloading

Reviewed-by: pliden, neliasso
  • Loading branch information
fisk committed Jan 13, 2021
1 parent 6bb6093 commit 42d2d6d
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 7 deletions.
27 changes: 25 additions & 2 deletions src/hotspot/share/code/codeBlob.cpp
Expand Up @@ -304,12 +304,22 @@ AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
return blob;
}

void* VtableBlob::operator new(size_t s, unsigned size) throw() {
// Handling of allocation failure stops compilation and prints a bunch of
// stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
// can be locked, and then re-locking the CodeCache_lock. That is not safe in
// this context as we hold the CompiledICLocker. So we just don't handle code
// cache exhaustion here; we leave that for a later allocation that does not
// hold the CompiledICLocker.
return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
}

VtableBlob::VtableBlob(const char* name, int size) :
BufferBlob(name, size) {
}

VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");

VtableBlob* blob = NULL;
unsigned int size = sizeof(VtableBlob);
Expand All @@ -318,8 +328,21 @@ VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
size += align_up(buffer_size, oopSize);
assert(name != NULL, "must provide a name");
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
if (!CodeCache_lock->try_lock()) {
// If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
// IC transition to megamorphic, for which this stub will be needed. It is better to
// bail out the transition, and wait for a more opportune moment. Not only is it not
// worth waiting for the lock blockingly for the megamorphic transition, it might
// also result in a deadlock to blockingly wait, when concurrent class unloading is
// performed. At this point in time, the CompiledICLocker is taken, so we are not
// allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
// consistently taken in the opposite order. Bailing out results in an IC transition to
// the clean state instead, which will cause subsequent calls to retry the transitioning
// eventually.
return NULL;
}
blob = new (size) VtableBlob(name, size);
CodeCache_lock->unlock();
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
Expand Down
2 changes: 2 additions & 0 deletions src/hotspot/share/code/codeBlob.hpp
Expand Up @@ -441,6 +441,8 @@ class VtableBlob: public BufferBlob {
private:
VtableBlob(const char*, int);

void* operator new(size_t s, unsigned size) throw();

public:
// Creation
static VtableBlob* create(const char* name, int buffer_size);
Expand Down
10 changes: 6 additions & 4 deletions src/hotspot/share/code/codeCache.cpp
Expand Up @@ -483,7 +483,7 @@ CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
* run the constructor for the CodeBlob subclass he is busy
* instantiating.
*/
CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) {
CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool handle_alloc_failure, int orig_code_blob_type) {
// Possibly wakes up the sweeper thread.
NMethodSweeper::report_allocation(code_blob_type);
assert_locked_or_safepoint(CodeCache_lock);
Expand Down Expand Up @@ -531,11 +531,13 @@ CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_t
tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
heap->name(), get_code_heap(type)->name());
}
return allocate(size, type, orig_code_blob_type);
return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
}
}
MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompileBroker::handle_full_code_cache(orig_code_blob_type);
if (handle_alloc_failure) {
MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompileBroker::handle_full_code_cache(orig_code_blob_type);
}
return NULL;
}
if (PrintCodeCacheExtension) {
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/code/codeCache.hpp
Expand Up @@ -136,7 +136,7 @@ class CodeCache : AllStatic {
static const GrowableArray<CodeHeap*>* nmethod_heaps() { return _nmethod_heaps; }

// Allocation/administration
static CodeBlob* allocate(int size, int code_blob_type, int orig_code_blob_type = CodeBlobType::All); // allocates a new CodeBlob
static CodeBlob* allocate(int size, int code_blob_type, bool handle_alloc_failure = true, int orig_code_blob_type = CodeBlobType::All); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
Expand Down

1 comment on commit 42d2d6d

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.