Skip to content
This repository has been archived by the owner on Aug 27, 2022. It is now read-only.

Commit

Permalink
8247179: Mechanism for VM operations to not take part in safepoint co…
Browse files Browse the repository at this point in the history
…alescing

Reviewed-by: rehn, dholmes
  • Loading branch information
fisk committed Jun 23, 2020
1 parent 88df2f0 commit a10a928
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 73 deletions.
2 changes: 0 additions & 2 deletions src/hotspot/share/runtime/safepoint.cpp
Expand Up @@ -1181,8 +1181,6 @@ void SafepointTracing::statistics_exit_log() {
}
}

log_info(safepoint, stats)("VM operations coalesced during safepoint " INT64_FORMAT,
VMThread::get_coalesced_count());
log_info(safepoint, stats)("Maximum sync time " INT64_FORMAT" ns",
(int64_t)(_max_sync_time));
log_info(safepoint, stats)("Maximum vm operation time (except for Exit VM operation) "
Expand Down
68 changes: 1 addition & 67 deletions src/hotspot/share/runtime/vmThread.cpp
Expand Up @@ -105,29 +105,6 @@ VM_Operation* VMOperationQueue::queue_remove_front(int prio) {
return r;
}

VM_Operation* VMOperationQueue::queue_drain(int prio) {
if (queue_empty(prio)) return NULL;
DEBUG_ONLY(int length = _queue_length[prio];);
assert(length >= 0, "sanity check");
_queue_length[prio] = 0;
VM_Operation* r = _queue[prio]->next();
assert(r != _queue[prio], "cannot remove base element");
// remove links to base element from head and tail
r->set_prev(NULL);
_queue[prio]->prev()->set_next(NULL);
// restore queue to empty state
_queue[prio]->set_next(_queue[prio]);
_queue[prio]->set_prev(_queue[prio]);
assert(queue_empty(prio), "drain corrupted queue");
#ifdef ASSERT
int len = 0;
VM_Operation* cur;
for(cur = r; cur != NULL; cur=cur->next()) len++;
assert(len == length, "drain lost some ops");
#endif
return r;
}

//-----------------------------------------------------------------
// High-level interface
void VMOperationQueue::add(VM_Operation *op) {
Expand Down Expand Up @@ -199,7 +176,6 @@ VMThread* VMThread::_vm_thread = NULL;
VM_Operation* VMThread::_cur_vm_operation = NULL;
VMOperationQueue* VMThread::_vm_queue = NULL;
PerfCounter* VMThread::_perf_accumulated_vm_operation_time = NULL;
uint64_t VMThread::_coalesced_count = 0;
VMOperationTimeoutTask* VMThread::_timeout_task = NULL;


Expand Down Expand Up @@ -435,7 +411,6 @@ void VMThread::loop() {
SafepointSynchronize::init(_vm_thread);

while(true) {
VM_Operation* safepoint_ops = NULL;
//
// Wait for VM operation
//
Expand Down Expand Up @@ -480,13 +455,6 @@ void VMThread::loop() {
}
}
_cur_vm_operation = _vm_queue->remove_next();

// If we are at a safepoint we will evaluate all the operations that
// follow that also require a safepoint
if (_cur_vm_operation != NULL &&
_cur_vm_operation->evaluate_at_safepoint()) {
safepoint_ops = _vm_queue->drain_at_safepoint_priority();
}
}

if (should_terminate()) break;
Expand All @@ -512,41 +480,7 @@ void VMThread::loop() {
}

evaluate_operation(_cur_vm_operation);
// now process all queued safepoint ops, iteratively draining
// the queue until there are none left
do {
_cur_vm_operation = safepoint_ops;
if (_cur_vm_operation != NULL) {
do {
EventMark em("Executing coalesced safepoint VM operation: %s", _cur_vm_operation->name());
log_debug(vmthread)("Evaluating coalesced safepoint VM operation: %s", _cur_vm_operation->name());
// evaluate_operation deletes the op object so we have
// to grab the next op now
VM_Operation* next = _cur_vm_operation->next();
evaluate_operation(_cur_vm_operation);
_cur_vm_operation = next;
_coalesced_count++;
} while (_cur_vm_operation != NULL);
}
// There is a chance that a thread enqueued a safepoint op
// since we released the op-queue lock and initiated the safepoint.
// So we drain the queue again if there is anything there, as an
// optimization to try and reduce the number of safepoints.
// As the safepoint synchronizes us with JavaThreads we will see
// any enqueue made by a JavaThread, but the peek will not
// necessarily detect a concurrent enqueue by a GC thread, but
// that simply means the op will wait for the next major cycle of the
// VMThread - just as it would if the GC thread lost the race for
// the lock.
if (_vm_queue->peek_at_safepoint_priority()) {
// must hold lock while draining queue
MutexLocker mu_queue(VMOperationQueue_lock,
Mutex::_no_safepoint_check_flag);
safepoint_ops = _vm_queue->drain_at_safepoint_priority();
} else {
safepoint_ops = NULL;
}
} while(safepoint_ops != NULL);
_cur_vm_operation = NULL;

if (_timeout_task != NULL) {
_timeout_task->disarm();
Expand Down
4 changes: 0 additions & 4 deletions src/hotspot/share/runtime/vmThread.hpp
Expand Up @@ -64,7 +64,6 @@ class VMOperationQueue : public CHeapObj<mtInternal> {
bool queue_empty (int prio);
void queue_add (int prio, VM_Operation *op);
VM_Operation* queue_remove_front(int prio);
VM_Operation* queue_drain(int prio);
// lock-free query: may return the wrong answer but must not break
bool queue_peek(int prio) { return _queue_length[prio] > 0; }

Expand All @@ -74,7 +73,6 @@ class VMOperationQueue : public CHeapObj<mtInternal> {
// Highlevel operations. Encapsulates policy
void add(VM_Operation *op);
VM_Operation* remove_next(); // Returns next or null
VM_Operation* drain_at_safepoint_priority() { return queue_drain(SafepointPriority); }
bool peek_at_safepoint_priority() { return queue_peek(SafepointPriority); }
};

Expand Down Expand Up @@ -113,7 +111,6 @@ class VMThread: public NamedThread {
static bool _terminated;
static Monitor * _terminate_lock;
static PerfCounter* _perf_accumulated_vm_operation_time;
static uint64_t _coalesced_count;

static VMOperationTimeoutTask* _timeout_task;

Expand Down Expand Up @@ -148,7 +145,6 @@ class VMThread: public NamedThread {
// Returns the current vm operation if any.
static VM_Operation* vm_operation() { return _cur_vm_operation; }
static VM_Operation::VMOp_Type vm_op_type() { return _cur_vm_operation->type(); }
static uint64_t get_coalesced_count() { return _coalesced_count; }

// Returns the single instance of VMThread.
static VMThread* vm_thread() { return _vm_thread; }
Expand Down

0 comments on commit a10a928

Please sign in to comment.