Skip to content
Permalink
Browse files
8255243: Reinforce escape barrier interactions with ZGC conc stack pr…
…ocessing

Co-authored-by: Richard Reingruber <rrich@openjdk.org>
Reviewed-by: rrich, sspitsyn
  • Loading branch information
fisk and reinrich committed Oct 29, 2020
1 parent faf23de commit 5b185585e808918b1fdd5eefe05bf506dfabdd22
@@ -1653,17 +1653,9 @@ JvmtiEnv::PopFrame(JavaThread* java_thread) {
// Eagerly reallocate scalar replaced objects.
JavaThread* current_thread = JavaThread::current();
EscapeBarrier eb(true, current_thread, java_thread);
if (eb.barrier_active()) {
if (java_thread->frames_to_pop_failed_realloc() > 0) {
// VM is in the process of popping the top frame because it has scalar replaced objects which
// could not be reallocated on the heap.
// Return JVMTI_ERROR_OUT_OF_MEMORY to avoid interfering with the VM.
return JVMTI_ERROR_OUT_OF_MEMORY;
}
if (!eb.deoptimize_objects(1)) {
// Reallocation of scalar replaced objects failed -> return with error
return JVMTI_ERROR_OUT_OF_MEMORY;
}
if (!eb.deoptimize_objects(1)) {
// Reallocation of scalar replaced objects failed -> return with error
return JVMTI_ERROR_OUT_OF_MEMORY;
}

MutexLocker mu(JvmtiThreadState_lock);
@@ -1378,17 +1378,9 @@ JvmtiEnvBase::force_early_return(JavaThread* java_thread, jvalue value, TosState
// Eagerly reallocate scalar replaced objects.
JavaThread* current_thread = JavaThread::current();
EscapeBarrier eb(true, current_thread, java_thread);
if (eb.barrier_active()) {
if (java_thread->frames_to_pop_failed_realloc() > 0) {
// VM is in the process of popping the top frame because it has scalar replaced objects
// which could not be reallocated on the heap.
// Return JVMTI_ERROR_OUT_OF_MEMORY to avoid interfering with the VM.
return JVMTI_ERROR_OUT_OF_MEMORY;
}
if (!eb.deoptimize_objects(0)) {
// Reallocation of scalar replaced objects failed -> return with error
return JVMTI_ERROR_OUT_OF_MEMORY;
}
if (!eb.deoptimize_objects(0)) {
// Reallocation of scalar replaced objects failed -> return with error
return JVMTI_ERROR_OUT_OF_MEMORY;
}

SetForceEarlyReturn op(state, value, tos);
@@ -628,58 +628,12 @@ static bool can_be_deoptimized(vframe* vf) {
return (vf->is_compiled_frame() && vf->fr().can_be_deoptimized());
}

// Revert optimizations based on escape analysis if this is an access to a local object
bool VM_GetOrSetLocal::deoptimize_objects(javaVFrame* jvf) {
#if COMPILER2_OR_JVMCI
assert(_type == T_OBJECT, "EscapeBarrier should not be active if _type != T_OBJECT");
if (_depth < _thread->frames_to_pop_failed_realloc()) {
// cannot access frame with failed reallocations
bool VM_GetOrSetLocal::doit_prologue() {
if (!_eb.deoptimize_objects(_depth, _depth)) {
// The target frame is affected by a reallocation failure.
_result = JVMTI_ERROR_OUT_OF_MEMORY;
return false;
}
if (can_be_deoptimized(jvf)) {
compiledVFrame* cf = compiledVFrame::cast(jvf);
if (cf->has_ea_local_in_scope() && !_eb.deoptimize_objects(cf->fr().id())) {
// reallocation of scalar replaced objects failed because heap is exhausted
_result = JVMTI_ERROR_OUT_OF_MEMORY;
return false;
}
}

// With this access the object could escape the thread changing its escape state from ArgEscape,
// to GlobalEscape so we must deoptimize callers which could have optimized on the escape state.
vframe* vf = jvf;
do {
// move to next physical frame
while(!vf->is_top()) {
vf = vf->sender();
}
vf = vf->sender();

if (vf != NULL && vf->is_compiled_frame()) {
compiledVFrame* cvf = compiledVFrame::cast(vf);
// Deoptimize objects if arg escape is being passed down the stack.
// Note that deoptimizing the frame is not enough because objects need to be relocked
if (cvf->arg_escape() && !_eb.deoptimize_objects(cvf->fr().id())) {
// reallocation of scalar replaced objects failed because heap is exhausted
_result = JVMTI_ERROR_OUT_OF_MEMORY;
return false;
}
}
} while(vf != NULL && !vf->is_entry_frame());
#endif // COMPILER2_OR_JVMCI
return true;
}

bool VM_GetOrSetLocal::doit_prologue() {
if (_eb.barrier_active()) {
_jvf = get_java_vframe();
NULL_CHECK(_jvf, false);

if (!deoptimize_objects(_jvf)) {
return false;
}
}

return true;
}
@@ -334,7 +334,6 @@ class VM_GetOrSetLocal : public VM_Operation {
javaVFrame* get_java_vframe();
bool check_slot_type_lvt(javaVFrame* vf);
bool check_slot_type_no_lvt(javaVFrame* vf);
bool deoptimize_objects(javaVFrame* vf);

public:
// Constructor for non-object getter
@@ -218,11 +218,6 @@ static bool eliminate_allocations(JavaThread* thread, int exec_mode, CompiledMet
Thread* THREAD = thread;
// Clear pending OOM if reallocation fails and return true indicating allocation failure
realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
// Make sure the deoptee frame gets processed after a potential safepoint during
// object reallocation. This is necessary because (a) deoptee_thread can be
// different from the current thread and (b) the deoptee frame does not need to be
// the top frame.
StackWatermarkSet::finish_processing(deoptee_thread, NULL /* context */, StackWatermarkKind::gc);
deoptimized_objects = true;
} else {
JRT_BLOCK
@@ -33,6 +33,7 @@
#include "runtime/handles.hpp"
#include "runtime/handshake.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/keepStackGCProcessed.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/registerMap.hpp"
#include "runtime/stackValue.hpp"
@@ -62,29 +63,37 @@ bool EscapeBarrier::objs_are_deoptimized(JavaThread* thread, intptr_t* fr_id) {
return result;
}

// Object references of frames up to the given depth are about to be
// accessed. Frames with optimizations based on escape state that is potentially
// changed by the accesses need to be deoptimized and the referenced objects
// need to be reallocated and relocked. Up to depth this is done for frames
// with not escaping objects in scope. For deeper frames it is done only if
// they pass not escaping objects as arguments because they potentially escape
// from callee frames within the given depth.
// The search for deeper frames is ended if an entry frame is found because
// arguments to native methods are considered to escape globally.
bool EscapeBarrier::deoptimize_objects(int depth) {
if (barrier_active() && deoptee_thread()->has_last_Java_frame()) {
// Deoptimize objects of frames of the target thread at depth >= d1 and depth <= d2.
// Deoptimize objects of caller frames if they passed references to ArgEscape objects as arguments.
// Return false in the case of a reallocation failure and true otherwise.
bool EscapeBarrier::deoptimize_objects(int d1, int d2) {
if (!barrier_active()) return true;
if (d1 < deoptee_thread()->frames_to_pop_failed_realloc()) {
// The deoptee thread has frames with reallocation failures on top of its stack.
// These frames are about to be removed. We must not interfere with that and signal failure.
return false;
}
if (deoptee_thread()->has_last_Java_frame()) {
assert(calling_thread() == Thread::current(), "should be");
KeepStackGCProcessedMark ksgcpm(deoptee_thread());
ResourceMark rm(calling_thread());
HandleMark hm(calling_thread());
RegisterMap reg_map(deoptee_thread(), false /* update_map */, false /* process_frames */);
vframe* vf = deoptee_thread()->last_java_vframe(&reg_map);
int cur_depth = 0;
while (vf != NULL && ((cur_depth <= depth) || !vf->is_entry_frame())) {

// Skip frames at depth < d1
while (vf != NULL && cur_depth < d1) {
cur_depth++;
vf = vf->sender();
}

while (vf != NULL && ((cur_depth <= d2) || !vf->is_entry_frame())) {
if (vf->is_compiled_frame()) {
compiledVFrame* cvf = compiledVFrame::cast(vf);
// Deoptimize frame and local objects if any exist.
// If cvf is deeper than depth, then we deoptimize iff local objects are passed as args.
bool should_deopt = cur_depth <= depth ? cvf->has_ea_local_in_scope() : cvf->arg_escape();
bool should_deopt = cur_depth <= d2 ? cvf->has_ea_local_in_scope() : cvf->arg_escape();
if (should_deopt && !deoptimize_objects(cvf->fr().id())) {
// reallocation of scalar replaced objects failed because heap is exhausted
return false;
@@ -109,7 +118,13 @@ bool EscapeBarrier::deoptimize_objects_all_threads() {
if (!barrier_active()) return true;
ResourceMark rm(calling_thread());
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
if (jt->frames_to_pop_failed_realloc() > 0) {
// The deoptee thread jt has frames with reallocation failures on top of its stack.
// These frames are about to be removed. We must not interfere with that and signal failure.
return false;
}
if (jt->has_last_Java_frame()) {
KeepStackGCProcessedMark ksgcpm(jt);
RegisterMap reg_map(jt, false /* update_map */, false /* process_frames */);
vframe* vf = jt->last_java_vframe(&reg_map);
assert(jt->frame_anchor()->walkable(),
@@ -297,7 +312,7 @@ static void set_objs_are_deoptimized(JavaThread* thread, intptr_t* fr_id) {
// frame is replaced with interpreter frames. Returns false iff at least one
// reallocation failed.
bool EscapeBarrier::deoptimize_objects_internal(JavaThread* deoptee, intptr_t* fr_id) {
if (!barrier_active()) return true;
assert(barrier_active(), "should not call");

JavaThread* ct = calling_thread();
bool realloc_failures = false;
@@ -307,11 +322,7 @@ bool EscapeBarrier::deoptimize_objects_internal(JavaThread* deoptee, intptr_t* f
compiledVFrame* last_cvf;
bool fr_is_deoptimized;
do {
if (!self_deopt()) {
// Process stack of deoptee thread as we will access oops during object deoptimization.
StackWatermarkSet::start_processing(deoptee, StackWatermarkKind::gc);
}
StackFrameStream fst(deoptee, true /* update */, true /* process_frames */);
StackFrameStream fst(deoptee, true /* update */, false /* process_frames */);
while (fst.current()->id() != fr_id && !fst.is_done()) {
fst.next();
}
@@ -61,6 +61,12 @@ class EscapeBarrier : StackObj {
// Deoptimize the given frame and deoptimize objects with optimizations based on escape analysis.
bool deoptimize_objects_internal(JavaThread* deoptee, intptr_t* fr_id);

// Deoptimize objects, i.e. reallocate and relock them. The target frames are deoptimized.
// The methods return false iff at least one reallocation failed.
bool deoptimize_objects(intptr_t* fr_id) {
return deoptimize_objects_internal(deoptee_thread(), fr_id);
}

public:
// Revert ea based optimizations for given deoptee thread
EscapeBarrier(bool barrier_active, JavaThread* calling_thread, JavaThread* deoptee_thread)
@@ -89,13 +95,17 @@ class EscapeBarrier : StackObj {
bool barrier_active() const { return false; }
#endif // COMPILER2_OR_JVMCI

// Deoptimize objects, i.e. reallocate and relock them. The target frames are deoptimized.
// The methods return false iff at least one reallocation failed.
bool deoptimize_objects(intptr_t* fr_id) {
return true COMPILER2_OR_JVMCI_PRESENT(&& deoptimize_objects_internal(deoptee_thread(), fr_id));
// Deoptimize objects of frames of the target thread up to the given depth.
// Deoptimize objects of caller frames if they passed references to ArgEscape objects as arguments.
// Return false in the case of a reallocation failure and true otherwise.
bool deoptimize_objects(int depth) {
return deoptimize_objects(0, depth);
}

bool deoptimize_objects(int depth) NOT_COMPILER2_OR_JVMCI_RETURN_(true);
// Deoptimize objects of frames of the target thread at depth >= d1 and depth <= d2.
// Deoptimize objects of caller frames if they passed references to ArgEscape objects as arguments.
// Return false in the case of a reallocation failure and true otherwise.
bool deoptimize_objects(int d1, int d2) NOT_COMPILER2_OR_JVMCI_RETURN_(true);

// Find and deoptimize non escaping objects and the holding frames on all stacks.
bool deoptimize_objects_all_threads() NOT_COMPILER2_OR_JVMCI_RETURN_(true);
@@ -0,0 +1,60 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

#include "precompiled.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/stackWatermark.inline.hpp"
#include "runtime/stackWatermarkSet.inline.hpp"
#include "runtime/keepStackGCProcessed.hpp"

KeepStackGCProcessedMark::KeepStackGCProcessedMark(JavaThread* jt) :
_active(true),
_jt(jt) {
finish_processing();
if (!Thread::current()->is_Java_thread()) {
assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(),
"must be either Java thread or VM thread in a safepoint");
_active = false;
return;
}
StackWatermark* our_watermark = StackWatermarkSet::get(JavaThread::current(), StackWatermarkKind::gc);
if (our_watermark == NULL) {
_active = false;
return;
}
StackWatermark* their_watermark = StackWatermarkSet::get(jt, StackWatermarkKind::gc);
our_watermark->link_watermark(their_watermark);
}

KeepStackGCProcessedMark::~KeepStackGCProcessedMark() {
if (!_active) {
return;
}
StackWatermark* our_watermark = StackWatermarkSet::get(JavaThread::current(), StackWatermarkKind::gc);
our_watermark->link_watermark(NULL);
}

void KeepStackGCProcessedMark::finish_processing() {
StackWatermarkSet::finish_processing(_jt, NULL /* context */, StackWatermarkKind::gc);
}
@@ -0,0 +1,49 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

#ifndef SHARE_RUNTIME_KEEPSTACKGCPROCESSED_HPP
#define SHARE_RUNTIME_KEEPSTACKGCPROCESSED_HPP

#include "memory/allocation.hpp"
#include "runtime/stackWatermark.hpp"
#include "runtime/stackWatermarkKind.hpp"
#include "runtime/stackWatermarkSet.hpp"

// Use this class to mark a remote thread you are currently interested
// in examining the entire stack, without it slipping into an unprocessed
// state at safepoint polls.
class KeepStackGCProcessedMark : public StackObj {
friend class StackWatermark;
bool _active;
JavaThread* _jt;

void finish_processing();

public:
KeepStackGCProcessedMark(JavaThread* jt);
~KeepStackGCProcessedMark();
};


#endif // SHARE_RUNTIME_KEEPSTACKGCPROCESSED_HPP
@@ -83,13 +83,13 @@ void SafepointMechanism::process(JavaThread *thread) {
SafepointSynchronize::block(thread); // Recursive
}

// The call to start_processing fixes the thread's oops and the first few frames.
// The call to on_safepoint fixes the thread's oops and the first few frames.
//
// The call has been carefully placed here to cater for a few situations:
// 1) After we exit from block after a global poll
// 2) After a thread races with the disarming of the global poll and transitions from native/blocked
// 3) Before the handshake code is run
StackWatermarkSet::start_processing(thread, StackWatermarkKind::gc);
StackWatermarkSet::on_safepoint(thread);

if (thread->handshake_state()->should_process()) {
thread->handshake_state()->process_by_self();
Loading

1 comment on commit 5b18558

@bridgekeeper

This comment has been minimized.

Copy link

@bridgekeeper bridgekeeper bot commented on 5b18558 Oct 29, 2020

Please sign in to comment.