Skip to content
Permalink
Browse files
8255661: TestHeapDumpOnOutOfMemoryError fails with EOFException
Reviewed-by: rrich, cjplummer
  • Loading branch information
schmelter-sap committed May 17, 2021
1 parent a555fd8 commit a29612ea9998a8e45f25add7ae30bfbc62ce3756
Showing with 27 additions and 36 deletions.
  1. +1 −1 src/hotspot/share/services/heapDumper.cpp
  2. +23 −33 src/hotspot/share/services/heapDumperCompression.cpp
  3. +3 −2 src/hotspot/share/services/heapDumperCompression.hpp
@@ -444,7 +444,7 @@ class DumpWriter : public StackObj {
void finish_dump_segment();

// Called by threads used for parallel writing.
void writer_loop() { _backend.thread_loop(false); }
void writer_loop() { _backend.thread_loop(); }
// Called when finished to release the threads.
void deactivate() { flush(); _backend.deactivate(); }
};
@@ -250,48 +250,30 @@ void CompressionBackend::deactivate() {
ml.notify_all();
}

// Wait for the threads to drain the compression work list.
// Wait for the threads to drain the compression work list and do some work yourself.
while (!_to_compress.is_empty()) {
// If we have no threads, compress the current one itself.
if (_nr_of_threads == 0) {
MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
thread_loop(true);
} else {
ml.wait();
}
do_foreground_work();
}

_active = false;
ml.notify_all();
}

void CompressionBackend::thread_loop(bool single_run) {
// Register if this is a worker thread.
if (!single_run) {
void CompressionBackend::thread_loop() {
{
MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
_nr_of_threads++;
}

while (true) {
WriteWork* work = get_work();

if (work == NULL) {
assert(!single_run, "Should never happen for single thread");
MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
_nr_of_threads--;
assert(_nr_of_threads >= 0, "Too many threads finished");
ml.notify_all();

return;
} else {
do_compress(work);
finish_work(work);
}

if (single_run) {
return;
}
WriteWork* work;
while ((work = get_work()) != NULL) {
do_compress(work);
finish_work(work);
}

MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);
_nr_of_threads--;
assert(_nr_of_threads >= 0, "Too many threads finished");
}

void CompressionBackend::set_error(char const* new_error) {
@@ -363,6 +345,16 @@ void CompressionBackend::free_work_list(WorkList* list) {
}
}

void CompressionBackend::do_foreground_work() {
assert(!_to_compress.is_empty(), "Must have work to do");
assert(_lock->owned_by_self(), "Must have the lock");

WriteWork* work = _to_compress.remove_first();
MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
do_compress(work);
finish_work(work);
}

WriteWork* CompressionBackend::get_work() {
MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag);

@@ -405,9 +397,7 @@ void CompressionBackend::get_new_buffer(char** buffer, size_t* used, size_t* max
_unused.add_first(work);
}
} else if (!_to_compress.is_empty() && (_nr_of_threads == 0)) {
// If we have no threads, compress the current one itself.
MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
thread_loop(true);
do_foreground_work();
} else {
ml.wait();
}
@@ -199,6 +199,7 @@ class CompressionBackend : StackObj {
void free_work(WriteWork* work);
void free_work_list(WorkList* list);

void do_foreground_work();
WriteWork* get_work();
void do_compress(WriteWork* work);
void finish_work(WriteWork* work);
@@ -221,8 +222,8 @@ class CompressionBackend : StackObj {
// Commits the old buffer (using the value in *used) and sets up a new one.
void get_new_buffer(char** buffer, size_t* used, size_t* max);

// The entry point for a worker thread. If single_run is true, we only handle one entry.
void thread_loop(bool single_run);
// The entry point for a worker thread.
void thread_loop();

// Shuts down the backend, releasing all threads.
void deactivate();

1 comment on commit a29612e

@openjdk-notifier

This comment has been minimized.

Copy link

@openjdk-notifier openjdk-notifier bot commented on a29612e May 17, 2021

Please sign in to comment.