Skip to content
Permalink
Browse files
8257422: [lworld] Problems with scalarized inline type return and inc…
…remental inlining
  • Loading branch information
TobiHartmann committed Dec 2, 2020
1 parent 8efeb68 commit 8680152416550cf26a263ba48dc19cfe26bd77bf
Showing 5 changed files with 93 additions and 31 deletions.
@@ -478,16 +478,19 @@ void LateInlineCallGenerator::do_late_inline() {
return;
}

// Allocate a buffer for the returned InlineTypeNode because the caller expects an oop return.
// Do this before the method handle call in case the buffer allocation triggers deoptimization.
// Check if we are late inlining a method handle call that returns an inline type as fields.
Node* buffer_oop = NULL;
if (is_mh_late_inline() && _inline_cg->method()->return_type()->is_inlinetype()) {
ciType* mh_rt = _inline_cg->method()->return_type();
if (is_mh_late_inline() && mh_rt->is_inlinetype() && mh_rt->as_inline_klass()->can_be_returned_as_fields()) {
// Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
// Do this before the method handle call in case the buffer allocation triggers deoptimization and
// we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
GraphKit arg_kit(jvms, &gvn);
{
PreserveReexecuteState preexecs(&arg_kit);
arg_kit.jvms()->set_should_reexecute(true);
arg_kit.inc_sp(nargs);
Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(_inline_cg->method()->return_type()->as_inline_klass()));
Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(mh_rt->as_inline_klass()));
buffer_oop = arg_kit.new_instance(klass_node, NULL, NULL, /* deoptimize_on_exception */ true);
}
jvms = arg_kit.transfer_exceptions_into_jvms();
@@ -521,22 +524,30 @@ void LateInlineCallGenerator::do_late_inline() {
C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup

// Handle inline type returns
bool returned_as_fields = call->tf()->returns_inline_type_as_fields();
if (result->is_InlineType()) {
// Only possible if is_mh_late_inline() when the callee does not "know" that the caller expects an oop
assert(is_mh_late_inline() && !returned_as_fields, "sanity");
assert(buffer_oop != NULL, "should have allocated a buffer");
InlineTypeNode* vt = result->as_InlineType();
vt->store(&kit, buffer_oop, buffer_oop, vt->type()->inline_klass(), 0);
// Do not let stores that initialize this buffer be reordered with a subsequent
// store that would make this buffer accessible by other threads.
AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop, &kit.gvn());
assert(alloc != NULL, "must have an allocation node");
kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
result = buffer_oop;
} else if (result->is_InlineTypePtr() && returned_as_fields) {
result->as_InlineTypePtr()->replace_call_results(&kit, call, C);
InlineTypeNode* vt = result->isa_InlineType();
if (vt != NULL) {
if (call->tf()->returns_inline_type_as_fields()) {
vt->replace_call_results(&kit, call, C);
} else {
// Only possible with is_mh_late_inline() when the callee does not "know" that the caller expects an oop
assert(is_mh_late_inline(), "sanity");
assert(!result->isa_InlineType()->is_allocated(&kit.gvn()), "already allocated");
assert(buffer_oop != NULL, "should have allocated a buffer");
vt->store(&kit, buffer_oop, buffer_oop, vt->type()->inline_klass());
// Do not let stores that initialize this buffer be reordered with a subsequent
// store that would make this buffer accessible by other threads.
AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop, &kit.gvn());
assert(alloc != NULL, "must have an allocation node");
kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
// Convert to InlineTypePtrNode to keep track of field values
kit.gvn().hash_delete(vt);
vt->set_oop(buffer_oop);
DEBUG_ONLY(buffer_oop = NULL);
vt = kit.gvn().transform(vt)->as_InlineType();
result = vt->as_ptr(&kit.gvn());
}
}
assert(buffer_oop == NULL, "unused buffer allocation");

kit.replace_call(call, result, true);
}
@@ -1804,7 +1804,8 @@ Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAr
void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline) {
PreserveReexecuteState preexecs(this);
if (EnableValhalla) {
// Make sure the call is re-executed, if buffering of inline type arguments triggers deoptimization
// Make sure the call is "re-executed", if buffering of inline type arguments triggers deoptimization.
// At this point, the call hasn't been executed yet, so we will only ever execute the call once.
jvms()->set_should_reexecute(true);
int arg_size = method()->get_declared_signature_at_bci(bci())->arg_size_for_bc(java_bc());
inc_sp(arg_size);
@@ -393,7 +393,7 @@ InlineTypePtrNode* InlineTypeBaseNode::buffer(GraphKit* kit, bool safe_for_repla
ciInlineKlass* vk = inline_klass();
Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
Node* alloc_oop = kit->new_instance(klass_node, NULL, NULL, /* deoptimize_on_exception */ true, this);
store(kit, alloc_oop, alloc_oop, vk, 0);
store(kit, alloc_oop, alloc_oop, vk);

// Do not let stores that initialize this buffer be reordered with a subsequent
// store that would make this buffer accessible by other threads.
@@ -445,7 +445,7 @@ InlineTypePtrNode* InlineTypeBaseNode::as_ptr(PhaseGVN* phase) const {
}

// When a call returns multiple values, it has several result
// projections, one per field. Replacing the result of the call by a
// projections, one per field. Replacing the result of the call by an
// inline type node (after late inlining) requires that for each result
// projection, we find the corresponding inline type field.
void InlineTypeBaseNode::replace_call_results(GraphKit* kit, Node* call, Compile* C) {
@@ -622,7 +622,7 @@ InlineTypeNode* InlineTypeNode::make_larval(GraphKit* kit, bool allocate) const
AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop, &kit->gvn());
alloc->_larval = true;

store(kit, alloc_oop, alloc_oop, vk, 0);
store(kit, alloc_oop, alloc_oop, vk);
res->set_oop(alloc_oop);
}
res->set_type(TypeInlineType::make(vk, true));
@@ -823,9 +823,9 @@ void Parse::build_exits() {
if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
ret_type = TypeOopPtr::BOTTOM;
}
if ((_caller->has_method() || tf()->returns_inline_type_as_fields()) &&
// Scalarize inline type when returning as fields or inlining non-incrementally
if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
ret_type->is_inlinetypeptr() && ret_type->inline_klass()->is_scalarizable() && !ret_type->maybe_null()) {
// Scalarize inline type return when inlining or with multiple return values
ret_type = TypeInlineType::make(ret_type->inline_klass());
}
int ret_size = type2size[ret_type->basic_type()];
@@ -2339,13 +2339,14 @@ void Parse::return_current(Node* value) {
Node* phi = _exits.argument(0);
const Type* return_type = phi->bottom_type();
const TypeOopPtr* tr = return_type->isa_oopptr();
if (return_type->isa_inlinetype() && !Compile::current()->inlining_incrementally()) {
// The return_type is set in Parse::build_exits().
if (return_type->isa_inlinetype()) {
// Inline type is returned as fields, make sure it is scalarized
if (!value->is_InlineType()) {
value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass());
}
if (!_caller->has_method()) {
// Inline type is returned as fields from root method, make sure all non-flattened
if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
// Returning from root or an incrementally inlined method. Make sure all non-flattened
// fields are buffered and re-execute if allocation triggers deoptimization.
PreserveReexecuteState preexecs(this);
assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
@@ -2360,9 +2361,6 @@ void Parse::return_current(Node* value) {
jvms()->set_should_reexecute(true);
inc_sp(1);
value = value->as_InlineType()->buffer(this);
if (Compile::current()->inlining_incrementally()) {
value = value->as_InlineTypeBase()->allocate_fields(this);
}
} else if (tr && tr->isa_instptr() && tr->klass()->is_loaded() && tr->klass()->is_interface()) {
// If returning oops to an interface-return, there is a silent free
// cast from oop to interface allowed by the Verifier. Make it explicit here.
@@ -1035,4 +1035,56 @@ public void test48_verifier(boolean warmup) {
MyValueEmpty empty = test48(EmptyContainer.default);
Asserts.assertEquals(empty, MyValueEmpty.default);
}

// Test conditional inline type return with incremental inlining
public MyValue3 test49_inlined1(boolean b) {
if (b) {
return MyValue3.create();
} else {
return MyValue3.create();
}
}

public MyValue3 test49_inlined2(boolean b) {
return test49_inlined1(b);
}

@Test
public void test49(boolean b) {
test49_inlined2(b);
}

@DontCompile
public void test49_verifier(boolean warmup) {
test49(true);
test49(false);
}

// Variant of test49 with result verification (triggered different failure mode)
final MyValue3 test50_vt = MyValue3.create();
final MyValue3 test50_vt2 = test50_vt;

public MyValue3 test50_inlined1(boolean b) {
if (b) {
return test50_vt;
} else {
return test50_vt2;
}
}

public MyValue3 test50_inlined2(boolean b) {
return test50_inlined1(b);
}

@Test
public void test50(boolean b) {
MyValue3 vt = test50_inlined2(b);
test50_vt.verify(vt);
}

@DontCompile
public void test50_verifier(boolean warmup) {
test50(true);
test50(false);
}
}

0 comments on commit 8680152

Please sign in to comment.