Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8246603: [lworld] C2 does not scalarize inline types wrapped into non-escaping box objects #71

Closed
wants to merge 3 commits into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
@@ -874,14 +874,14 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
if (m->is_ValueType() && !t->isa_valuetype()) {
// Allocate value type in fast path
m = ValueTypePtrNode::make_from_value_type(&kit, m->as_ValueType());
m = m->as_ValueType()->buffer(&kit);
kit.map()->set_req(i, m);
}
if (n->is_ValueType() && !t->isa_valuetype()) {
// Allocate value type in slow path
PreserveJVMState pjvms(&kit);
kit.set_map(slow_map);
n = ValueTypePtrNode::make_from_value_type(&kit, n->as_ValueType());
n = n->as_ValueType()->buffer(&kit);
kit.map()->set_req(i, n);
slow_map = kit.stop();
}
@@ -1646,61 +1646,6 @@ void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
}
}

Node* AllocateNode::Ideal(PhaseGVN* phase, bool can_reshape) {
// Check for unused value type allocation
if (can_reshape && in(AllocateNode::ValueNode) != NULL &&
outcnt() != 0 && result_cast() == NULL) {
// Remove allocation by replacing the projection nodes with its inputs
InitializeNode* init = initialization();
PhaseIterGVN* igvn = phase->is_IterGVN();
CallProjections* projs = extract_projections(true, false);
assert(projs->nb_resproj <= 1, "unexpected number of results");
if (projs->fallthrough_catchproj != NULL) {
igvn->replace_node(projs->fallthrough_catchproj, in(TypeFunc::Control));
}
if (projs->fallthrough_memproj != NULL) {
igvn->replace_node(projs->fallthrough_memproj, in(TypeFunc::Memory));
}
if (projs->catchall_memproj != NULL) {
igvn->replace_node(projs->catchall_memproj, phase->C->top());
}
if (projs->fallthrough_ioproj != NULL) {
igvn->replace_node(projs->fallthrough_ioproj, in(TypeFunc::I_O));
}
if (projs->catchall_ioproj != NULL) {
igvn->replace_node(projs->catchall_ioproj, phase->C->top());
}
if (projs->catchall_catchproj != NULL) {
igvn->replace_node(projs->catchall_catchproj, phase->C->top());
}
if (projs->resproj[0] != NULL) {
// Remove MemBarStoreStore user as well
for (DUIterator_Fast imax, i = projs->resproj[0]->fast_outs(imax); i < imax; i++) {
MemBarStoreStoreNode* mb = projs->resproj[0]->fast_out(i)->isa_MemBarStoreStore();
if (mb != NULL && mb->outcnt() == 2) {
mb->remove(igvn);
--i; --imax;
}
}
igvn->replace_node(projs->resproj[0], phase->C->top());
}
igvn->replace_node(this, phase->C->top());
if (init != NULL) {
Node* ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
Node* mem_proj = init->proj_out_or_null(TypeFunc::Memory);
if (ctrl_proj != NULL) {
igvn->replace_node(ctrl_proj, init->in(TypeFunc::Control));
}
if (mem_proj != NULL) {
igvn->replace_node(mem_proj, init->in(TypeFunc::Memory));
}
}
return NULL;
}

return CallNode::Ideal(phase, can_reshape);
}

Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) {
Node* mark_node = NULL;
// For now only enable fast locking for non-array types
@@ -928,8 +928,6 @@ class AllocateNode : public CallNode {
virtual uint ideal_reg() const { return Op_RegP; }
virtual bool guaranteed_safepoint() { return false; }

virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);

// allocations do not modify their arguments
virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}

@@ -2156,6 +2156,8 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// see if this phi should be sliced
uint merge_width = 0;
bool saw_self = false;
// TODO revisit this with JDK-8247216
bool mergemem_only = true;
for( uint i=1; i<req(); ++i ) {// For all paths in
Node *ii = in(i);
// TOP inputs should not be counted as safe inputs because if the
@@ -2168,11 +2170,13 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
MergeMemNode* n = ii->as_MergeMem();
merge_width = MAX2(merge_width, n->req());
saw_self = saw_self || phase->eqv(n->base_memory(), this);
} else {
mergemem_only = false;
}
}

// This restriction is temporarily necessary to ensure termination:
if (!saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0;
if (!mergemem_only && !saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0;

if (merge_width > Compile::AliasIdxRaw) {
// found at least one non-empty MergeMem
@@ -407,8 +407,11 @@ void Compile::remove_useless_nodes(Unique_Node_List &useful) {
}
}
// Remove useless value type nodes
if (_value_type_nodes != NULL) {
_value_type_nodes->remove_useless_nodes(useful.member_set());
for (int i = _value_type_nodes->length() - 1; i >= 0; i--) {
Node* vt = _value_type_nodes->at(i);
if (!useful.member(vt)) {
_value_type_nodes->remove(vt);
}
}
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
bs->eliminate_useless_gc_barriers(useful, this);
@@ -1020,7 +1023,7 @@ void Compile::Init(int aliaslevel) {
_expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_opaque4_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_value_type_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
_value_type_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
register_library_intrinsics();
#ifdef ASSERT
_type_verify_symmetry = true;
@@ -1876,24 +1879,25 @@ void Compile::add_value_type(Node* n) {

void Compile::remove_value_type(Node* n) {
assert(n->is_ValueTypeBase(), "unexpected node");
if (_value_type_nodes != NULL) {
if (_value_type_nodes != NULL && _value_type_nodes->contains(n)) {
_value_type_nodes->remove(n);
}
}

// Does the return value keep otherwise useless value type allocations
// alive?
// Does the return value keep otherwise useless value type allocations alive?
static bool return_val_keeps_allocations_alive(Node* ret_val) {
ResourceMark rm;
Unique_Node_List wq;
wq.push(ret_val);
bool some_allocations = false;
for (uint i = 0; i < wq.size(); i++) {
Node* n = wq.at(i);
assert(!n->is_ValueTypeBase(), "chain of value type nodes");
assert(!n->is_ValueType(), "chain of value type nodes");
if (n->outcnt() > 1) {
// Some other use for the allocation
return false;
} else if (n->is_ValueTypePtr()) {
wq.push(n->in(1));
} else if (n->is_Phi()) {
for (uint j = 1; j < n->req(); j++) {
wq.push(n->in(j));
@@ -1907,18 +1911,24 @@ static bool return_val_keeps_allocations_alive(Node* ret_val) {
return some_allocations;
}

void Compile::process_value_types(PhaseIterGVN &igvn) {
void Compile::process_value_types(PhaseIterGVN &igvn, bool post_ea) {
// Make value types scalar in safepoints
while (_value_type_nodes->size() != 0) {
ValueTypeBaseNode* vt = _value_type_nodes->pop()->as_ValueTypeBase();
for (int i = _value_type_nodes->length()-1; i >= 0; i--) {
ValueTypeBaseNode* vt = _value_type_nodes->at(i)->as_ValueTypeBase();
vt->make_scalar_in_safepoints(&igvn);
if (vt->is_ValueTypePtr()) {
igvn.replace_node(vt, vt->get_oop());
} else if (vt->outcnt() == 0) {
igvn.remove_dead_node(vt);
}
// Remove ValueTypePtr nodes only after EA to give scalar replacement a chance
// to remove buffer allocations. ValueType nodes are kept until loop opts and
// removed via ValueTypeNode::remove_redundant_allocations.
if (post_ea) {
while (_value_type_nodes->length() > 0) {
ValueTypeBaseNode* vt = _value_type_nodes->pop()->as_ValueTypeBase();
if (vt->is_ValueTypePtr()) {
igvn.replace_node(vt, vt->get_oop());
}
}
}
_value_type_nodes = NULL;
// Make sure that the return value does not keep an unused allocation alive
if (tf()->returns_value_type_as_fields()) {
Node* ret = NULL;
for (uint i = 1; i < root()->req(); i++){
@@ -2487,7 +2497,7 @@ void Compile::Optimize() {
igvn.optimize();
}

if (_value_type_nodes->size() > 0) {
if (_value_type_nodes->length() > 0) {
// Do this once all inlining is over to avoid getting inconsistent debug info
process_value_types(igvn);
}
@@ -2526,6 +2536,11 @@ void Compile::Optimize() {
}
}

if (_value_type_nodes->length() > 0) {
// Process value types again now that EA might have simplified the graph
process_value_types(igvn, /* post_ea= */ true);
}

// Loop transforms on the ideal graph. Range Check Elimination,
// peeling, unrolling, etc.

@@ -318,7 +318,7 @@ class Compile : public Phase {
GrowableArray<Node*>* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
GrowableArray<Node*>* _range_check_casts; // List of CastII nodes with a range check dependency
GrowableArray<Node*>* _opaque4_nodes; // List of Opaque4 nodes that have a default value
Unique_Node_List* _value_type_nodes; // List of ValueType nodes
GrowableArray<Node*>* _value_type_nodes; // List of ValueType nodes
ConnectionGraph* _congraph;
#ifndef PRODUCT
IdealGraphPrinter* _printer;
@@ -714,8 +714,7 @@ class Compile : public Phase {
// Keep track of value type nodes for later processing
void add_value_type(Node* n);
void remove_value_type(Node* n);
void process_value_types(PhaseIterGVN &igvn);
bool can_add_value_type() const { return _value_type_nodes != NULL; }
void process_value_types(PhaseIterGVN &igvn, bool post_ea = false);

void adjust_flattened_array_access_aliases(PhaseIterGVN& igvn);

@@ -431,6 +431,7 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de
map_ideal_node(n, phantom_obj);
break;
}
case Op_ValueTypePtr:
case Op_CastPP:
case Op_CheckCastPP:
case Op_EncodeP:
@@ -602,6 +603,7 @@ void ConnectionGraph::add_final_edges(Node *n) {
add_base(n_ptn->as_Field(), ptn_base);
break;
}
case Op_ValueTypePtr:
case Op_CastPP:
case Op_CheckCastPP:
case Op_EncodeP:
@@ -3258,7 +3260,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives ||
op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
op == Op_SubTypeCheck || op == Op_ValueType ||
op == Op_SubTypeCheck || op == Op_ValueType || op == Op_ValueTypePtr ||
BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
n->dump();
use->dump();
@@ -1622,7 +1622,7 @@ Node* GraphKit::access_store_at(Node* obj,
// the store is re-executed if the allocation triggers deoptimization.
PreserveReexecuteState preexecs(this);
jvms()->set_should_reexecute(true);
val = val->as_ValueType()->allocate(this, safe_for_replace)->get_oop();
val = val->as_ValueType()->buffer(this, safe_for_replace);
}

C2AccessValuePtr addr(adr, adr_type);
@@ -1815,10 +1815,9 @@ void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inli
continue;
} else if (arg->is_ValueType()) {
// Pass value type argument via oop to callee
if (is_late_inline) {
arg = ValueTypePtrNode::make_from_value_type(this, arg->as_ValueType());
} else {
arg = arg->as_ValueType()->allocate(this)->get_oop();
arg = arg->as_ValueType()->buffer(this);
if (!is_late_inline) {
arg = arg->as_ValueTypePtr()->get_oop();
}
}
call->init_req(idx++, arg);
@@ -4582,9 +4581,10 @@ Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
if (con_type != NULL) {
Node* con = makecon(con_type);
assert(!field->is_flattenable() || (field->is_static() && !con_type->is_zero_type()), "sanity");
if (field->layout_type() == T_VALUETYPE && field->type()->as_value_klass()->is_scalarizable()) {
// Check type of constant which might be more precise
if (con_type->is_valuetypeptr() && con_type->value_klass()->is_scalarizable()) {
// Load value type from constant oop
con = ValueTypeNode::make_from_oop(this, con, field->type()->as_value_klass());
con = ValueTypeNode::make_from_oop(this, con, con_type->value_klass());
}
return con;
}
@@ -143,7 +143,7 @@ class LibraryCallKit : public GraphKit {
// inline type. Make sure the call is re-executed if the allocation triggers a deoptimization.
PreserveReexecuteState preexecs(this);
jvms()->set_should_reexecute(true);
res = ValueTypePtrNode::make_from_value_type(this, res->as_ValueType());
res = res->as_ValueType()->buffer(this);
}
push_node(bt, res);
}
@@ -2522,8 +2522,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c
// Re-execute the unsafe access if allocation triggers deoptimization.
PreserveReexecuteState preexecs(this);
jvms()->set_should_reexecute(true);
vt = vt->allocate(this)->as_ValueType();
base = vt->get_oop();
base = vt->buffer(this)->get_oop();
}
}