-
Notifications
You must be signed in to change notification settings - Fork 6.1k
8327963: C2: fix construction of memory graph around Initialize node to prevent incorrect execution if allocation is removed #24570
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
9f220f4
5f8e5bf
5041f64
dfa5da2
125f970
71fd831
a4031f3
c32e453
082dcb5
7139deb
469d8a5
1639264
377a8d7
a32b1d2
7afc47e
17517ae
f22471c
a401bed
624d69e
6134c97
6a01123
a76430b
9f3b9cf
02b43b7
50e9437
23035e0
80d562a
d5a2738
af8480c
080638c
616678f
2a451c6
a6c6c04
43c6f82
c0a8ad2
4b656f2
24ff0e2
c189a7a
69c6e50
3b5b54a
ec9f278
f528ce8
fc13578
b066f3c
b701d03
c64d68c
0ff5c45
46972dc
917ea91
2207487
f871390
6ea8c81
9fd8dc1
f12efbc
48257c9
b5ac753
957be06
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -863,7 +863,7 @@ Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr | |
| // \|/ | ||
| // Phi # "Field" Phi | ||
| // | ||
| void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) { | ||
| void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node*> &alloc_worklist) { | ||
| Node* ophi = curr_castpp->in(1); | ||
| assert(ophi->is_Phi(), "Expected this to be a Phi node."); | ||
|
|
||
|
|
@@ -1279,7 +1279,7 @@ bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, No | |
| return true; | ||
| } | ||
|
|
||
| void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) { | ||
| void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist) { | ||
| bool delay = _igvn->delay_transform(); | ||
| _igvn->set_delay_transform(true); | ||
| _igvn->hash_delete(ophi); | ||
|
|
@@ -1306,7 +1306,7 @@ void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_wo | |
| // splitting CastPPs we make reference to the inputs of the Cmp that is used | ||
| // by the If controlling the CastPP. | ||
| for (uint i = 0; i < castpps.size(); i++) { | ||
| reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist, memnode_worklist); | ||
| reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist); | ||
| } | ||
|
|
||
| for (uint i = 0; i < others.size(); i++) { | ||
|
|
@@ -4152,6 +4152,11 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra | |
| // which contains this memory slice, otherwise skip over it. | ||
| if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) { | ||
| result = proj_in->in(TypeFunc::Memory); | ||
| } else if (C->get_alias_index(result->adr_type()) != alias_idx) { | ||
| assert(C->get_general_index(alias_idx) == C->get_alias_index(result->adr_type()), "should be projection for the same field/array element"); | ||
| result = get_map(result->_idx); | ||
| assert(result != nullptr, "new projection should have been allocated"); | ||
| break; | ||
| } | ||
| } else if (proj_in->is_MemBar()) { | ||
| // Check if there is an array copy for a clone | ||
|
|
@@ -4448,6 +4453,22 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, | |
| _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); | ||
| _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); | ||
| if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { | ||
| // Add a new NarrowMem projection for each existing NarrowMem projection with new adr type | ||
| InitializeNode* init = alloc->as_Allocate()->initialization(); | ||
| assert(init != nullptr, "can't find Initialization node for this Allocate node"); | ||
| auto process_narrow_proj = [&](NarrowMemProjNode* proj) { | ||
| const TypePtr* adr_type = proj->adr_type(); | ||
| const TypePtr* new_adr_type = tinst->add_offset(adr_type->offset()); | ||
| if (adr_type != new_adr_type && !init->already_has_narrow_mem_proj_with_adr_type(new_adr_type)) { | ||
| DEBUG_ONLY( uint alias_idx = _compile->get_alias_index(new_adr_type); ) | ||
| assert(_compile->get_general_index(alias_idx) == _compile->get_alias_index(adr_type), "new adr type should be narrowed down from existing adr type"); | ||
| NarrowMemProjNode* new_proj = new NarrowMemProjNode(init, new_adr_type); | ||
| igvn->set_type(new_proj, new_proj->bottom_type()); | ||
| record_for_optimizer(new_proj); | ||
| set_map(proj, new_proj); // record it so ConnectionGraph::find_inst_mem() can find it | ||
| } | ||
| }; | ||
| init->for_each_narrow_mem_proj_with_new_uses(process_narrow_proj); | ||
|
|
||
| // First, put on the worklist all Field edges from Connection Graph | ||
| // which is more accurate than putting immediate users from Ideal Graph. | ||
|
|
@@ -4519,7 +4540,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, | |
| // finishes. For now we just try to split out the SR inputs of the merge. | ||
| Node* parent = n->in(1); | ||
| if (reducible_merges.member(n)) { | ||
| reduce_phi(n->as_Phi(), alloc_worklist, memnode_worklist); | ||
| reduce_phi(n->as_Phi(), alloc_worklist); | ||
| #ifdef ASSERT | ||
| if (VerifyReduceAllocationMerges) { | ||
| reduced_merges.push(n); | ||
|
|
@@ -4711,11 +4732,13 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, | |
| } | ||
| if (n->is_Phi() || n->is_ClearArray()) { | ||
| // we don't need to do anything, but the users must be pushed | ||
| } else if (n->is_MemBar()) { // Initialize, MemBar nodes | ||
| // we don't need to do anything, but the users must be pushed | ||
| n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); | ||
| if (n == nullptr) { | ||
| continue; | ||
| } else if (n->is_MemBar()) { // MemBar nodes | ||
| if (!n->is_Initialize()) { // memory projections for Initialize pushed below (so we get to all their uses) | ||
| // we don't need to do anything, but the users must be pushed | ||
| n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); | ||
| if (n == nullptr) { | ||
| continue; | ||
| } | ||
| } | ||
| } else if (n->is_CallLeaf()) { | ||
| // Runtime calls with narrow memory input (no MergeMem node) | ||
|
|
@@ -4732,6 +4755,8 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, | |
| // get the memory projection | ||
| n = n->find_out_with(Op_SCMemProj); | ||
| assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); | ||
| } else if (n->is_Proj()) { | ||
| assert(n->in(0)->is_Initialize(), "we only push memory projections for Initialize"); | ||
| } else { | ||
| #ifdef ASSERT | ||
| if (!n->is_Mem()) { | ||
|
|
@@ -4775,6 +4800,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, | |
| if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge | ||
| memnode_worklist.append_if_missing(use); | ||
| } | ||
| } else if (use->is_Proj()) { | ||
| assert(n->is_Initialize(), "We only push projections of Initialize"); | ||
| if (use->as_Proj()->_con == TypeFunc::Memory) { // Ignore precedent edge | ||
| memnode_worklist.append_if_missing(use); | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do you know why we are using a There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's not clear to me. I filed: https://bugs.openjdk.org/browse/JDK-8358560 as a follow up. |
||
| } | ||
| #ifdef ASSERT | ||
| } else if(use->is_Mem()) { | ||
| assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); | ||
|
|
@@ -4826,7 +4856,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, | |
| // First, update mergemem by moving memory nodes to corresponding slices | ||
| // if their type became more precise since this mergemem was created. | ||
| while (mem->is_Mem()) { | ||
| const Type *at = igvn->type(mem->in(MemNode::Address)); | ||
| const Type* at = igvn->type(mem->in(MemNode::Address)); | ||
| if (at != Type::TOP) { | ||
| assert (at->isa_ptr() != nullptr, "pointer type required."); | ||
| uint idx = (uint)_compile->get_alias_index(at->is_ptr()); | ||
|
|
@@ -4946,7 +4976,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, | |
| record_for_optimizer(n); | ||
| } else { | ||
| assert(n->is_Allocate() || n->is_CheckCastPP() || | ||
| n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); | ||
| n->is_AddP() || n->is_Phi() || n->is_NarrowMemProj(), "unknown node used for set_map()"); | ||
| } | ||
| } | ||
| #if 0 // ifdef ASSERT | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -563,8 +563,10 @@ class ConnectionGraph: public ArenaObj { | |
| // Memory Phi - most recent unique Phi split out | ||
| // from this Phi | ||
| // MemNode - new memory input for this node | ||
| // ChecCastPP - allocation that this is a cast of | ||
| // CheckCastPP - allocation that this is a cast of | ||
| // allocation - CheckCastPP of the allocation | ||
|
Comment on lines
563
to
567
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please add a new entry here explaining how |
||
| // NarrowMem - newly created projection (type includes instance_id) from projection created | ||
| // before EA | ||
|
|
||
| // manage entries in _node_map | ||
|
|
||
|
|
@@ -609,11 +611,11 @@ class ConnectionGraph: public ArenaObj { | |
| bool can_reduce_phi_check_inputs(PhiNode* ophi) const; | ||
|
|
||
| void reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *> &alloc_worklist); | ||
| void reduce_phi_on_castpp_field_load(Node* castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist); | ||
| void reduce_phi_on_castpp_field_load(Node* castpp, GrowableArray<Node*> &alloc_worklist); | ||
| void reduce_phi_on_cmp(Node* cmp); | ||
| bool reduce_phi_on_safepoints(PhiNode* ophi); | ||
| bool reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints); | ||
| void reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist); | ||
| void reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist); | ||
|
|
||
| void set_not_scalar_replaceable(PointsToNode* ptn NOT_PRODUCT(COMMA const char* reason)) const { | ||
| #ifndef PRODUCT | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -4031,7 +4031,9 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { | |
| call->init_req(TypeFunc::I_O, C->top()); // Does no I/O. | ||
| call->init_req(TypeFunc::Memory, mem_phi->in(LoopNode::EntryControl)); | ||
| call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out_or_null(TypeFunc::ReturnAdr)); | ||
| call->init_req(TypeFunc::FramePtr, C->start()->proj_out_or_null(TypeFunc::FramePtr)); | ||
| Node* frame = new ParmNode(C->start(), TypeFunc::FramePtr); | ||
| _igvn.register_new_node_with_optimizer(frame); | ||
| call->init_req(TypeFunc::FramePtr, frame); | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This seems unrelated. Is it needed? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's one of the things mentioned in that comment: "I added asserts to catch cases where proj_out is called but the node has more than one matching projection. With those asserts, I caught some false positive/cases where we got lucky and worked around them by reworking the code so it doesn't use proj_out. That's the case in PhaseIdealLoop::intrinsify_fill(): we can end up there with more than one FramePtr projection because the code pattern used elsewhere is to add one more projection and let identical projections common during igvn. " There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Are we just lucky that we don't have the same problem with ReturnAdr here? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, most likely. |
||
| _igvn.register_new_node_with_optimizer(call); | ||
| result_ctrl = new ProjNode(call,TypeFunc::Control); | ||
| _igvn.register_new_node_with_optimizer(result_ctrl); | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could we now have multiple
NarrowMemProj? If so, what would happen here?