Skip to content
Permalink
Browse files
8255046: [lworld] JIT should make use of array layout encoding in mar…
…kWord

Reviewed-by: roland
  • Loading branch information
TobiHartmann committed Nov 13, 2020
1 parent a833bd8 commit 1b1aefaabd59b3dd98593c9af9b4a101f47f95d3
@@ -1990,32 +1990,51 @@ void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
// We are loading/storing from/to an array that *may* be flattened (the
// declared type is Object[], abstract[], interface[] or VT.ref[]).
// If this array is flattened, take the slow path.
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
Register klass = op->tmp()->as_register();
__ load_klass(klass, op->array()->as_register(), tmp_load_klass);
__ movl(klass, Address(klass, Klass::layout_helper_offset()));
__ testl(klass, Klass::_lh_array_tag_vt_value_bit_inplace);
__ jcc(Assembler::notZero, *op->stub()->entry());
if (UseArrayMarkWordCheck) {
__ test_flattened_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
} else {
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
__ load_klass(klass, op->array()->as_register(), tmp_load_klass);
__ movl(klass, Address(klass, Klass::layout_helper_offset()));
__ testl(klass, Klass::_lh_array_tag_vt_value_bit_inplace);
__ jcc(Assembler::notZero, *op->stub()->entry());
}
if (!op->value()->is_illegal()) {
// The array is not flattened, but it might be null-free. If we are storing
// a null into a null-free array, take the slow path (which will throw NPE).
Label skip;
__ cmpptr(op->value()->as_register(), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, skip);
__ testl(klass, Klass::_lh_null_free_bit_inplace);
__ jcc(Assembler::notZero, *op->stub()->entry());
if (UseArrayMarkWordCheck) {
__ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
} else {
__ testl(klass, Klass::_lh_null_free_bit_inplace);
__ jcc(Assembler::notZero, *op->stub()->entry());
}
__ bind(skip);
}
}

void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
// We are storing into an array that *may* be null-free (the declared type is
// Object[], abstract[], interface[] or VT.ref[]).
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
Register klass = op->tmp()->as_register();
__ load_klass(klass, op->array()->as_register(), tmp_load_klass);
__ movl(klass, Address(klass, Klass::layout_helper_offset()));
__ testl(klass, Klass::_lh_null_free_bit_inplace);
if (UseArrayMarkWordCheck) {
Label test_mark_word;
Register tmp = op->tmp()->as_register();
__ movptr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
__ testl(tmp, markWord::unlocked_value);
__ jccb(Assembler::notZero, test_mark_word);
__ load_prototype_header(tmp, op->array()->as_register(), rscratch1);
__ bind(test_mark_word);
__ testl(tmp, markWord::nullfree_array_bit_in_place);
} else {
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
Register klass = op->tmp()->as_register();
__ load_klass(klass, op->array()->as_register(), tmp_load_klass);
__ movl(klass, Address(klass, Klass::layout_helper_offset()));
__ testl(klass, Klass::_lh_null_free_bit_inplace);
}
}

void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
@@ -3252,17 +3271,24 @@ void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeS
__ testptr(obj, obj);
__ jcc(Assembler::zero, *slow_path->entry());
}
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
__ load_klass(tmp, obj, tmp_load_klass);
__ movl(tmp, Address(tmp, Klass::layout_helper_offset()));
if (is_dest) {
// We also take slow path if it's a null_free destination array, just in case the source array
// contains NULLs.
__ testl(tmp, Klass::_lh_null_free_bit_inplace);
if (UseArrayMarkWordCheck) {
if (is_dest) {
__ test_null_free_array_oop(obj, tmp, *slow_path->entry());
} else {
__ test_flattened_array_oop(obj, tmp, *slow_path->entry());
}
} else {
__ testl(tmp, Klass::_lh_array_tag_vt_value_bit_inplace);
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
__ load_klass(tmp, obj, tmp_load_klass);
__ movl(tmp, Address(tmp, Klass::layout_helper_offset()));
if (is_dest) {
// Take the slow path if it's a null_free destination array, in case the source array contains NULLs.
__ testl(tmp, Klass::_lh_null_free_bit_inplace);
} else {
__ testl(tmp, Klass::_lh_array_tag_vt_value_bit_inplace);
}
__ jcc(Assembler::notZero, *slow_path->entry());
}
__ jcc(Assembler::notZero, *slow_path->entry());
}


@@ -849,7 +849,8 @@ bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) {
cmp2->Opcode() == Op_CmpF || cmp2->Opcode() == Op_CmpD ||
cmp1->Opcode() == Op_CmpP || cmp1->Opcode() == Op_CmpN ||
cmp2->Opcode() == Op_CmpP || cmp2->Opcode() == Op_CmpN ||
cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck()) {
cmp1->is_SubTypeCheck() || cmp2->is_SubTypeCheck() ||
cmp1->is_FlatArrayCheck() || cmp2->is_FlatArrayCheck()) {
// Floats and pointers don't exactly obey trichotomy. To be on the safe side, don't transform their tests.
// SubTypeCheck is not commutative
return false;
@@ -401,7 +401,7 @@ class IfNode : public MultiBranchNode {
// Returns NULL is it couldn't improve the type.
static const TypeInt* filtered_int_type(PhaseGVN* phase, Node* val, Node* if_proj);

bool is_non_flattened_array_check(PhaseTransform* phase, Node** array = NULL);
bool is_flat_array_check(PhaseTransform* phase, Node** array = NULL);

#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
@@ -161,6 +161,7 @@ macro(EncodeP)
macro(EncodePKlass)
macro(FastLock)
macro(FastUnlock)
macro(FlatArrayCheck)
macro(FmaD)
macro(FmaF)
macro(Goto)
@@ -1638,21 +1638,28 @@ int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_va
if (init_val == phantom_obj) {
// Do nothing for Allocate nodes since its fields values are
// "known" unless they are initialized by arraycopy/clone.
if (alloc->is_Allocate() && !pta->arraycopy_dst())
return 0;
assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");
if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != NULL) {
// Non-flattened inline type arrays are initialized with
// the default value instead of null. Handle them here.
init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx);
assert(init_val != NULL, "default value should be registered");
} else {
return 0;
}
}
// Non-escaped allocation returned from Java or runtime call has unknown values in fields.
assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
#ifdef ASSERT
if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) {
if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == NULL) {
const char* name = alloc->as_CallStaticJava()->_name;
assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
}
#endif
// Non-escaped allocation returned from Java or runtime call have
// unknown values in fields.
for (EdgeIterator i(pta); i.has_next(); i.next()) {
PointsToNode* field = i.get();
if (field->is_Field() && field->as_Field()->is_oop()) {
if (add_edge(field, phantom_obj)) {
if (add_edge(field, init_val)) {
// New edge was added
new_edges++;
add_field_uses_to_worklist(field->as_Field());
@@ -1663,8 +1670,9 @@ int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_va
}
assert(init_val == null_obj, "sanity");
// Do nothing for Call nodes since its fields values are unknown.
if (!alloc->is_Allocate())
if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != NULL) {
return 0;
}

InitializeNode* ini = alloc->as_Allocate()->initialization();
bool visited_bottom_offset = false;
@@ -3496,7 +3504,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
} else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
n->dump();
use->dump();
assert(false, "EA: missing memory path");
@@ -3517,7 +3517,7 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_contro
if (region->req() == 3 && region->in(2) != NULL && region->in(2)->in(0) != NULL) {
IfNode* iff = region->in(2)->in(0)->isa_If();
if (iff != NULL) {
iff->is_non_flattened_array_check(&_gvn, &array);
iff->is_flat_array_check(&_gvn, &array);
}
}
}
@@ -3548,8 +3548,8 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_contro
}

Node* GraphKit::inline_type_test(Node* obj, bool is_inline) {
Node* mark_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
Node* mark = make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
Node* mark_adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
Node* mark = make_load(NULL, mark_adr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
Node* mask = MakeConX(markWord::inline_type_pattern);
Node* masked = _gvn.transform(new AndXNode(mark, mask));
Node* cmp = _gvn.transform(new CmpXNode(masked, mask));
@@ -3566,8 +3566,12 @@ Node* GraphKit::array_lh_test(Node* klass, jint mask, jint val, bool eq) {
}

Node* GraphKit::flat_array_test(Node* ary, bool flat) {
Node* klass = load_object_klass(ary);
return array_lh_test(klass, Klass::_lh_array_tag_vt_value_bit_inplace, 0, !flat);
// We can't use immutable memory here because the mark word is mutable.
// PhaseIdealLoop::move_flat_array_check_out_of_loop will make sure the
// check is moved out of loops (mainly to enable loop unswitching).
Node* mem = UseArrayMarkWordCheck ? memory(Compile::AliasIdxRaw) : immutable_memory();
Node* cmp = _gvn.transform(new FlatArrayCheckNode(C, mem, ary));
return _gvn.transform(new BoolNode(cmp, flat ? BoolTest::eq : BoolTest::ne));
}

Node* GraphKit::null_free_array_test(Node* klass, bool null_free) {
@@ -1181,51 +1181,21 @@ bool IfNode::is_null_check(ProjNode* proj, PhaseIterGVN* igvn) {
return false;
}

// Returns true if this IfNode belongs to a non-flattened array check
// Returns true if this IfNode belongs to a flat array check
// and returns the corresponding array in the 'array' parameter.
bool IfNode::is_non_flattened_array_check(PhaseTransform* phase, Node** array) {
bool IfNode::is_flat_array_check(PhaseTransform* phase, Node** array) {
Node* bol = in(1);
if (!bol->is_Bool()) {
return false;
}
Node* cmp = bol->in(1);
if (cmp->Opcode() != Op_CmpI) {
return false;
}
Node* cmp_in1 = cmp->in(1);
Node* cmp_in2 = cmp->in(2);
if (cmp_in2->find_int_con(-1) != 0) {
return false;
}
if (cmp_in1->Opcode() != Op_AndI) {
return false;
}
Node* and_in1 = cmp_in1->in(1);
Node* and_in2 = cmp_in1->in(2);
if (and_in2->find_int_con(0) != Klass::_lh_array_tag_vt_value_bit_inplace) {
return false;
}
if (and_in1->Opcode() != Op_LoadI) {
return false;
}
intptr_t offset;
Node* ptr = and_in1->in(MemNode::Address);
Node* addr = AddPNode::Ideal_base_and_offset(ptr, phase, offset);
if (addr == NULL || offset != in_bytes(Klass::layout_helper_offset())) {
return false;
}
if (!phase->type(addr)->isa_klassptr()) {
return false;
}
Node* klass_load = ptr->as_AddP()->in(AddPNode::Base)->uncast();
if (klass_load->is_DecodeNKlass()) {
klass_load = klass_load->in(1);
}
if (array != NULL && klass_load->is_Load()) {
Node* address = klass_load->in(MemNode::Address);
*array = address->as_AddP()->in(AddPNode::Base);
if (cmp->isa_FlatArrayCheck()) {
if (array != NULL) {
*array = cmp->in(FlatArrayCheckNode::Array);
}
return true;
}
return true;
return false;
}

// Check that the If that is in between the 2 integer comparisons has
@@ -124,10 +124,10 @@ IfNode* PhaseIdealLoop::find_unswitching_candidate(const IdealLoopTree *loop, No

// Collect all non-flattened array checks for unswitching to create a fast loop
// without checks (only non-flattened array accesses) and a slow loop with checks.
if (unswitch_iff == NULL || unswitch_iff->is_non_flattened_array_check(&_igvn)) {
if (unswitch_iff == NULL || unswitch_iff->is_flat_array_check(&_igvn)) {
for (uint i = 0; i < loop->_body.size(); i++) {
IfNode* n = loop->_body.at(i)->isa_If();
if (n != NULL && n != unswitch_iff && n->is_non_flattened_array_check(&_igvn) &&
if (n != NULL && n != unswitch_iff && n->is_flat_array_check(&_igvn) &&
loop->is_invariant(n->in(1)) && !loop->is_loop_exit(n)) {
unswitch_iffs.push(n);
if (unswitch_iff == NULL) {
@@ -231,26 +231,18 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) {
// can be removed from the fast loop (true proj) but not from the
// slow loop (false proj) as it can have a mix of flattened/legacy accesses.
assert(bol->_test._test == BoolTest::ne, "IfTrue proj must point to flat array");
// Adjust condition such that the true proj points to non-flat array
bol = new BoolNode(bol->in(1), bol->_test.negate());
register_new_node(bol, invar_iff->in(0));
Node* cmp = bol->in(1)->clone();
register_new_node(cmp, invar_iff->in(0));
bol = bol->clone()->as_Bool();
register_new_node(bol, invar_iff_c);
FlatArrayCheckNode* cmp = bol->in(1)->clone()->as_FlatArrayCheck();
register_new_node(cmp, invar_iff_c);
bol->set_req(1, cmp);
// Combine all checks into a single one that fails if one array is flattened
Node* lhs = NULL;
assert(cmp->req() == 3, "unexpected number of inputs for FlatArrayCheck");
cmp->add_req_batch(C->top(), unswitch_iffs.size() - 1);
for (uint i = 0; i < unswitch_iffs.size(); i++) {
Node* lh = unswitch_iffs.at(i)->in(1)->in(1)->in(1)->in(1);
if (lhs == NULL) {
lhs = lh;
} else {
lhs = new OrINode(lhs, lh);
register_new_node(lhs, invar_iff->in(0));
}
Node* array = unswitch_iffs.at(i)->in(1)->in(1)->in(FlatArrayCheckNode::Array);
cmp->set_req(FlatArrayCheckNode::Array + i, array);
}
Node* masked = new AndINode(lhs, _igvn.intcon(Klass::_lh_array_tag_vt_value_bit_inplace));
register_new_node(masked, invar_iff->in(0));
cmp->set_req(1, masked);
}
invar_iff->set_req(1, bol);

@@ -267,7 +259,7 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) {
worklist.push(use);
}
}
ProjNode* invar_proj = invar_iff->proj_out(flat_array_checks ? (1-proj->_con) : proj->_con)->as_Proj();
ProjNode* invar_proj = invar_iff->proj_out(proj->_con)->as_Proj();
while (worklist.size() > 0) {
Node* use = worklist.pop();
Node* nuse = use->clone();
@@ -287,13 +279,13 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) {
for (uint i = 0; i < unswitch_iffs.size(); i++) {
IfNode* iff = unswitch_iffs.at(i)->as_If();
_igvn.rehash_node_delayed(iff);
dominated_by(proj_true, iff, /* flip = */ flat_array_checks, false);
dominated_by(proj_true, iff);
}
IfNode* unswitch_iff_clone = old_new[unswitch_iff->_idx]->as_If();
if (!flat_array_checks) {
ProjNode* proj_false = invar_iff->proj_out(0)->as_Proj();
_igvn.rehash_node_delayed(unswitch_iff_clone);
dominated_by(proj_false, unswitch_iff_clone, false, false);
dominated_by(proj_false, unswitch_iff_clone);
} else {
// Leave the flattened array checks in the slow loop and
// prevent it from being unswitched again based on these checks.
@@ -1375,6 +1375,7 @@ class PhaseIdealLoop : public PhaseTransform {
Node *place_near_use( Node *useblock ) const;
Node* try_move_store_before_loop(Node* n, Node *n_ctrl);
void try_move_store_after_loop(Node* n);
void move_flat_array_check_out_of_loop(Node* n);
bool identical_backtoback_ifs(Node *n);
bool flatten_array_element_type_check(Node *n);
bool can_split_if(Node *n_ctrl);

0 comments on commit 1b1aefa

Please sign in to comment.