Skip to content
Permalink
Browse files
8261874: [lworld] Non-flattened array blocks scalarization of inline …
…type
  • Loading branch information
TobiHartmann committed Feb 19, 2021
1 parent e31335f commit e88ff83c8bc527b417a2afb76f02125a198a46bc
Showing 7 changed files with 151 additions and 49 deletions.
@@ -988,6 +988,7 @@ void Compile::Init(int aliaslevel) {
_loop_opts_cnt = LoopOptsCount;
_has_flattened_accesses = false;
_flattened_accesses_share_alias = true;
_scalarize_in_safepoints = false;

set_do_inlining(Inline);
set_max_inline_size(MaxInlineSize);
@@ -1954,26 +1955,10 @@ static bool return_val_keeps_allocations_alive(Node* ret_val) {
}

void Compile::process_inline_types(PhaseIterGVN &igvn, bool remove) {
// Make inline types scalar in safepoints
for (int i = _inline_type_nodes.length()-1; i >= 0; i--) {
InlineTypeBaseNode* vt = _inline_type_nodes.at(i)->as_InlineTypeBase();
vt->make_scalar_in_safepoints(&igvn);
}
if (remove) {
// Remove inline type nodes
while (_inline_type_nodes.length() > 0) {
InlineTypeBaseNode* vt = _inline_type_nodes.pop()->as_InlineTypeBase();
if (vt->outcnt() == 0) {
igvn.remove_dead_node(vt);
} else if (vt->is_InlineTypePtr()) {
igvn.replace_node(vt, vt->get_oop());
} else {
igvn.replace_node(vt, igvn.C->top());
}
}
if (_inline_type_nodes.length() == 0) {
return;
}
// TODO only check once we are removing, right?
// Make sure that the return value does not keep an unused allocation alive
// Make sure that the return value does not keep an otherwise unused allocation alive
if (tf()->returns_inline_type_as_fields()) {
Node* ret = NULL;
for (uint i = 1; i < root()->req(); i++){
@@ -1993,6 +1978,31 @@ void Compile::process_inline_types(PhaseIterGVN &igvn, bool remove) {
}
}
}
if (remove) {
// Remove inline type nodes
while (_inline_type_nodes.length() > 0) {
InlineTypeBaseNode* vt = _inline_type_nodes.pop()->as_InlineTypeBase();
if (vt->outcnt() == 0) {
igvn.remove_dead_node(vt);
} else if (vt->is_InlineTypePtr()) {
igvn.replace_node(vt, vt->get_oop());
} else {
#ifdef ASSERT
for (DUIterator_Fast imax, i = vt->fast_outs(imax); i < imax; i++) {
assert(vt->fast_out(i)->is_InlineTypeBase(), "Unexpected inline type user");
}
#endif
igvn.replace_node(vt, igvn.C->top());
}
}
} else {
// Give inline types a chance to be scalarized in safepoints
// Delay this until all inlining is over to avoid getting inconsistent debug info
set_scalarize_in_safepoints(true);
for (int i = _inline_type_nodes.length()-1; i >= 0; i--) {
igvn._worklist.push(_inline_type_nodes.at(i));
}
}
igvn.optimize();
}

@@ -2627,10 +2637,8 @@ void Compile::Optimize() {
set_for_igvn(save_for_igvn);
}

if (_inline_type_nodes.length() > 0) {
// Do this once all inlining is over to avoid getting inconsistent debug info
process_inline_types(igvn);
}
// Process inline type nodes now that all inlining is over
process_inline_types(igvn);

adjust_flattened_array_access_aliases(igvn);

@@ -2737,11 +2745,8 @@ void Compile::Optimize() {
bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
#endif

if (_inline_type_nodes.length() > 0) {
// Process inline type nodes again and remove them. From here
// on we don't need to keep track of field values anymore.
process_inline_types(igvn, /* remove= */ true);
}
// Process inline type nodes again after loop opts
process_inline_types(igvn);

{
TracePhase tp("macroExpand", &timers[_t_macroExpand]);
@@ -2753,6 +2758,10 @@ void Compile::Optimize() {
print_method(PHASE_MACRO_EXPANSION, 2);
}

// Process inline type nodes again and remove them. From here
// on we don't need to keep track of field values anymore.
process_inline_types(igvn, /* remove= */ true);

{
TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
if (bs->expand_barriers(this, igvn)) {
@@ -307,6 +307,7 @@ class Compile : public Phase {
bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
bool _has_flattened_accesses; // Any known flattened array accesses?
bool _flattened_accesses_share_alias; // Initially all flattened array share a single slice
bool _scalarize_in_safepoints; // Scalarize inline types in safepoint debug info
uint _stress_seed; // Seed for stress testing

// Compilation environment.
@@ -602,6 +603,8 @@ class Compile : public Phase {
void set_flattened_accesses() { _has_flattened_accesses = true; }
bool flattened_accesses_share_alias() const { return _flattened_accesses_share_alias; }
void set_flattened_accesses_share_alias(bool z) { _flattened_accesses_share_alias = z; }
bool scalarize_in_safepoints() const { return _scalarize_in_safepoints; }
void set_scalarize_in_safepoints(bool z) { _scalarize_in_safepoints = z; }

// Support for scalarized inline type calling convention
bool has_scalarized_args() const { return _method != NULL && _method->has_scalarized_args(); }
@@ -210,7 +210,7 @@ int InlineTypeBaseNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node
for (uint j = 0; j < nfields; ++j) {
int offset = vk->nonstatic_field_at(j)->offset();
Node* value = field_value_by_offset(offset, true /* include flattened inline type fields */);
if (value->is_InlineType()) {
if (value->is_InlineTypeBase()) {
// Add inline type field to the worklist to process later
worklist.push(value);
}
@@ -241,10 +241,12 @@ void InlineTypeBaseNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allo
}
// Now scalarize non-flattened fields
for (uint i = 0; i < worklist.size(); ++i) {
Node* vt = worklist.at(i);
vt->as_InlineType()->make_scalar_in_safepoints(igvn);
InlineTypeBaseNode* vt = worklist.at(i)->isa_InlineTypeBase();
vt->make_scalar_in_safepoints(igvn);
}
if (outcnt() == 0) {
igvn->remove_dead_node(this);
}
igvn->record_for_igvn(this);
}

const TypePtr* InlineTypeBaseNode::field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const {
@@ -496,6 +498,14 @@ Node* InlineTypeBaseNode::allocate_fields(GraphKit* kit) {
return vt;
}

Node* InlineTypeBaseNode::Ideal(PhaseGVN* phase, bool can_reshape) {
if (phase->C->scalarize_in_safepoints() && can_reshape) {
PhaseIterGVN* igvn = phase->is_IterGVN();
make_scalar_in_safepoints(igvn);
}
return NULL;
}

InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk) {
// Create a new InlineTypeNode with uninitialized values and NULL oop
Node* oop = vk->is_empty() ? default_oop(gvn, vk) : gvn.zerocon(T_INLINE_TYPE);
@@ -861,7 +871,7 @@ Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
}
}
}
return NULL;
return InlineTypeBaseNode::Ideal(phase, can_reshape);
}

// Search for multiple allocations of this inline type and try to replace them by dominating allocations.
@@ -893,7 +903,7 @@ void InlineTypeNode::remove_redundant_allocations(PhaseIterGVN* igvn, PhaseIdeal
replace_allocation(igvn, res, res_dom);
// The result of the dominated allocation is now unused and will be removed
// later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
igvn->record_for_igvn(alloc);
igvn->_worklist.push(alloc);
}
}
}
@@ -94,6 +94,8 @@ class InlineTypeBaseNode : public TypeNode {

// Allocate all non-flattened inline type fields
Node* allocate_fields(GraphKit* kit);

virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
};

//------------------------------InlineTypeNode-------------------------------------
@@ -635,7 +635,11 @@ Node* PhaseMacroExpand::inline_type_from_mem(Node* mem, Node* ctl, ciInlineKlass
value = value_from_mem(mem, ctl, bt, ft, adr_type, alloc);
if (value != NULL && ft->isa_narrowoop()) {
assert(UseCompressedOops, "unexpected narrow oop");
value = transform_later(new DecodeNNode(value, value->get_ptr_type()));
if (value->is_EncodeP()) {
value = value->in(1);
} else {
value = transform_later(new DecodeNNode(value, value->get_ptr_type()));
}
}
}
if (value != NULL) {
@@ -656,6 +660,7 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
DEBUG_ONLY( Node* disq_node = NULL; )
bool can_eliminate = true;

Unique_Node_List worklist;
Node* res = alloc->result_cast();
const TypeOopPtr* res_type = NULL;
if (res == NULL) {
@@ -664,6 +669,7 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
can_eliminate = false;
} else {
worklist.push(res);
res_type = _igvn.type(res)->isa_oopptr();
if (res_type == NULL) {
NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";)
@@ -677,9 +683,9 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
}
}

if (can_eliminate && res != NULL) {
for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
j < jmax && can_eliminate; j++) {
while (can_eliminate && worklist.size() > 0) {
res = worklist.pop();
for (DUIterator_Fast jmax, j = res->fast_outs(jmax); j < jmax && can_eliminate; j++) {
Node* use = res->fast_out(j);

if (use->is_AddP()) {
@@ -730,6 +736,9 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
}
} else if (use->is_InlineType() && use->isa_InlineType()->get_oop() == res) {
// ok to eliminate
} else if (use->is_InlineTypePtr() && use->isa_InlineTypePtr()->get_oop() == res) {
// Process users
worklist.push(use);
} else if (use->Opcode() == Op_StoreX && use->in(MemNode::Address) == res) {
// Store to mark word of inline type larval buffer
assert(res_type->is_inlinetypeptr(), "Unexpected store to mark word");
@@ -765,7 +774,7 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
alloc->dump();
else
res->dump();
} else if (alloc->_is_scalar_replaceable) {
} else {
tty->print("NotScalar (%s)", fail_eliminate);
if (res == NULL)
alloc->dump();
@@ -951,18 +960,19 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
#endif
return false;
}
if (field_val->is_InlineType()) {
// Keep track of inline types to scalarize them later
value_worklist.push(field_val);
} else if (UseCompressedOops && field_type->isa_narrowoop()) {
if (UseCompressedOops && field_type->isa_narrowoop()) {
// Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
// to be able scalar replace the allocation.
if (field_val->is_EncodeP()) {
field_val = field_val->in(1);
} else {
} else if (!field_val->is_InlineTypeBase()) {
field_val = transform_later(new DecodeNNode(field_val, field_val->get_ptr_type()));
}
}
if (field_val->is_InlineTypeBase()) {
// Keep track of inline types to scalarize them later
value_worklist.push(field_val);
}
sfpt->add_req(field_val);
}
JVMState *jvms = sfpt->jvms();
@@ -980,8 +990,8 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
// because Deoptimization::reassign_flat_array_elements needs field values.
bool allow_oop = (klass == NULL) || !klass->is_flat_array_klass();
for (uint i = 0; i < value_worklist.size(); ++i) {
Node* vt = value_worklist.at(i);
vt->as_InlineType()->make_scalar_in_safepoints(&_igvn, allow_oop);
InlineTypeBaseNode* vt = value_worklist.at(i)->as_InlineTypeBase();
vt->make_scalar_in_safepoints(&_igvn, allow_oop);
}
return true;
}
@@ -2257,7 +2267,8 @@ void PhaseMacroExpand::inline_type_guard(Node** ctrl, LockNode* lock) {

assert(unc->peek_monitor_box() == lock->box_node(), "wrong monitor");
assert((obj_type->is_inlinetypeptr() && unc->peek_monitor_obj()->is_SafePointScalarObject()) ||
(unc->peek_monitor_obj() == lock->obj_node()), "wrong monitor");
(obj->is_InlineTypePtr() && obj->in(1) == unc->peek_monitor_obj()) ||
(obj == unc->peek_monitor_obj()), "wrong monitor");

// pop monitor and push obj back on stack: we trap before the monitorenter
unc->pop_monitor();
@@ -3365,4 +3365,56 @@ public void test145_verifier(boolean warmup) {
EmptyContainer empty = (EmptyContainer)test145(array);
Asserts.assertEquals(empty, EmptyContainer.default);
}

// Test that non-flattened array does not block inline type scalarization
@Test(failOn = ALLOC + ALLOCA + LOOP + LOAD + STORE)
@Warmup(50000)
public void test146(boolean b) {
MyValue2 vt = MyValue2.createWithFieldsInline(rI, rD);
MyValue2[] array = { vt };
if (b) {
for (int i = 0; i < 10; ++i) {
if (array != array) {
array = null;
}
}
}
}

@DontCompile
public void test146_verifier(boolean warmup) {
test146(true);
}

// Test that non-flattened array does not block inline type scalarization
@Test(failOn = ALLOC + ALLOCA + LOOP + LOAD + STORE)
@Warmup(50000)
public int test147(boolean deopt) {
// Both vt and array should be scalarized
MyValue2 vt = MyValue2.createWithFieldsInline(rI, rD);
MyValue2[] array = new MyValue2[1];

// Delay scalarization to after loop opts
boolean store = false;
for (int i = 0; i < 5; ++i) {
if (i == 1) {
store = true;
}
}
if (store) {
array[0] = vt;
}

if (deopt) {
// Uncommon trap referencing array
return array[0].x + 42;
}
return array[0].x;
}

@DontCompile
public void test147_verifier(boolean warmup) {
int res = test147(!warmup);
Asserts.assertEquals(res, MyValue2.createWithFieldsInline(rI, rD).x + (warmup ? 0 : 42));
}
}
@@ -23,9 +23,12 @@

/**
* @test
* @bug 8260034 8260225 8260283 8261037
* @bug 8260034 8260225 8260283 8261037 8261874
* @summary Generated inline type tests.
* @run main/othervm -Xbatch compiler.valhalla.inlinetypes.TestGenerated
* @run main/othervm -Xbatch
* compiler.valhalla.inlinetypes.TestGenerated
* @run main/othervm -Xbatch -XX:FlatArrayElementMaxSize=0
* compiler.valhalla.inlinetypes.TestGenerated
*/

package compiler.valhalla.inlinetypes;
@@ -122,6 +125,17 @@ boolean test8(MyValue1[] array) {
return array[0].array == array[0].array;
}

void test9(boolean b) {
MyValue1[] array = { new MyValue1() };
if (b) {
for (int i = 0; i < 10; ++i) {
if (array != array) {
array = null;
}
}
}
}

public static void main(String[] args) {
TestGenerated t = new TestGenerated();
EmptyValue[] array1 = { new EmptyValue() };
@@ -137,6 +151,7 @@ public static void main(String[] args) {
t.test6();
t.test7(false);
t.test8(array3);
t.test9(true);
}
}
}

0 comments on commit e88ff83

Please sign in to comment.