Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
57 commits
Select commit Hold shift + click to select a range
9f220f4
8327012
eme64 Mar 13, 2024
5f8e5bf
test4
eme64 Mar 13, 2024
5041f64
initializing store capturing: test stub
eme64 Apr 24, 2024
dfa5da2
more tests, WIP
eme64 Apr 25, 2024
125f970
fix & extra tests
rwestrel Mar 19, 2025
71fd831
whitespace
rwestrel Apr 10, 2025
a4031f3
TestIterativeEA fix
rwestrel Apr 10, 2025
c32e453
Update test/hotspot/jtreg/compiler/macronodes/TestEliminationOfAlloca…
rwestrel Apr 24, 2025
082dcb5
Update src/hotspot/share/opto/multnode.hpp
rwestrel Apr 24, 2025
7139deb
Update src/hotspot/share/opto/escape.cpp
rwestrel Apr 24, 2025
469d8a5
Update src/hotspot/share/opto/multnode.hpp
rwestrel Apr 24, 2025
1639264
Update test/hotspot/jtreg/compiler/macronodes/TestInitializingStoreCa…
rwestrel Apr 24, 2025
377a8d7
Update test/hotspot/jtreg/compiler/macronodes/TestEliminationOfAlloca…
rwestrel Apr 24, 2025
a32b1d2
Update src/hotspot/share/opto/escape.cpp
rwestrel Apr 24, 2025
7afc47e
Update src/hotspot/share/opto/escape.cpp
rwestrel Apr 24, 2025
17517ae
review
rwestrel Apr 24, 2025
f22471c
review
rwestrel Apr 24, 2025
a401bed
review
rwestrel Apr 25, 2025
624d69e
review
rwestrel Apr 25, 2025
6134c97
review
rwestrel May 5, 2025
6a01123
more
rwestrel May 9, 2025
a76430b
more
rwestrel May 9, 2025
9f3b9cf
more
rwestrel May 12, 2025
02b43b7
more
rwestrel May 12, 2025
50e9437
more
rwestrel May 12, 2025
23035e0
more
rwestrel May 13, 2025
80d562a
more
rwestrel May 13, 2025
d5a2738
more
rwestrel May 14, 2025
af8480c
typo
rwestrel May 15, 2025
080638c
Merge branch 'master' into JDK-8327963
rwestrel May 15, 2025
616678f
Merge branch 'master' into JDK-8327963
rwestrel May 21, 2025
2a451c6
new test
rwestrel May 21, 2025
a6c6c04
new test tweak
rwestrel May 21, 2025
43c6f82
review
rwestrel May 22, 2025
c0a8ad2
Update src/hotspot/share/opto/library_call.cpp
rwestrel Jun 3, 2025
4b656f2
Merge branch 'master' into JDK-8327963
rwestrel Jun 3, 2025
24ff0e2
lambda clean up
rwestrel Jun 3, 2025
c189a7a
lambda return
rwestrel Jun 3, 2025
69c6e50
more
rwestrel Jun 4, 2025
3b5b54a
more
rwestrel Jun 5, 2025
ec9f278
Merge branch 'master' into JDK-8327963
rwestrel Aug 26, 2025
f528ce8
more
rwestrel Aug 27, 2025
fc13578
more
rwestrel Aug 28, 2025
b066f3c
Merge branch 'master' into JDK-8327963
rwestrel Sep 9, 2025
b701d03
more
rwestrel Sep 9, 2025
c64d68c
Update test/hotspot/jtreg/compiler/macronodes/TestEliminationOfAlloca…
rwestrel Sep 22, 2025
0ff5c45
Update src/hotspot/share/opto/multnode.hpp
rwestrel Sep 22, 2025
46972dc
Update src/hotspot/share/opto/multnode.hpp
rwestrel Sep 22, 2025
917ea91
Update src/hotspot/share/opto/graphKit.cpp
rwestrel Sep 22, 2025
2207487
Update src/hotspot/share/opto/graphKit.cpp
rwestrel Sep 22, 2025
f871390
Update src/hotspot/share/opto/macro.cpp
rwestrel Sep 22, 2025
6ea8c81
Update src/hotspot/share/opto/macro.cpp
rwestrel Sep 22, 2025
9fd8dc1
review
rwestrel Sep 22, 2025
f12efbc
Roberto's patches
rwestrel Sep 29, 2025
48257c9
review
rwestrel Sep 29, 2025
b5ac753
Merge branch 'master' into JDK-8327963
rwestrel Oct 28, 2025
957be06
review
rwestrel Oct 28, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/hotspot/share/opto/classes.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,7 @@ macro(NegL)
macro(NegD)
macro(NegF)
macro(NeverBranch)
macro(NarrowMemProj)
macro(OnSpinWait)
macro(Opaque1)
macro(OpaqueLoopInit)
Expand Down
52 changes: 41 additions & 11 deletions src/hotspot/share/opto/escape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -863,7 +863,7 @@ Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr
// \|/
// Phi # "Field" Phi
//
void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) {
void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node*> &alloc_worklist) {
Node* ophi = curr_castpp->in(1);
assert(ophi->is_Phi(), "Expected this to be a Phi node.");

Expand Down Expand Up @@ -1279,7 +1279,7 @@ bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, No
return true;
}

void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) {
void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist) {
bool delay = _igvn->delay_transform();
_igvn->set_delay_transform(true);
_igvn->hash_delete(ophi);
Expand All @@ -1306,7 +1306,7 @@ void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_wo
// splitting CastPPs we make reference to the inputs of the Cmp that is used
// by the If controlling the CastPP.
for (uint i = 0; i < castpps.size(); i++) {
reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist, memnode_worklist);
reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist);
}

for (uint i = 0; i < others.size(); i++) {
Expand Down Expand Up @@ -4152,6 +4152,11 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
// which contains this memory slice, otherwise skip over it.
if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) {
result = proj_in->in(TypeFunc::Memory);
} else if (C->get_alias_index(result->adr_type()) != alias_idx) {
assert(C->get_general_index(alias_idx) == C->get_alias_index(result->adr_type()), "should be projection for the same field/array element");
result = get_map(result->_idx);
assert(result != nullptr, "new projection should have been allocated");
break;
}
} else if (proj_in->is_MemBar()) {
// Check if there is an array copy for a clone
Expand Down Expand Up @@ -4448,6 +4453,22 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
_compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
_compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
// Add a new NarrowMem projection for each existing NarrowMem projection with new adr type
InitializeNode* init = alloc->as_Allocate()->initialization();
assert(init != nullptr, "can't find Initialization node for this Allocate node");
auto process_narrow_proj = [&](NarrowMemProjNode* proj) {
const TypePtr* adr_type = proj->adr_type();
const TypePtr* new_adr_type = tinst->add_offset(adr_type->offset());
if (adr_type != new_adr_type && !init->already_has_narrow_mem_proj_with_adr_type(new_adr_type)) {
DEBUG_ONLY( uint alias_idx = _compile->get_alias_index(new_adr_type); )
assert(_compile->get_general_index(alias_idx) == _compile->get_alias_index(adr_type), "new adr type should be narrowed down from existing adr type");
NarrowMemProjNode* new_proj = new NarrowMemProjNode(init, new_adr_type);
igvn->set_type(new_proj, new_proj->bottom_type());
record_for_optimizer(new_proj);
set_map(proj, new_proj); // record it so ConnectionGraph::find_inst_mem() can find it
}
};
init->for_each_narrow_mem_proj_with_new_uses(process_narrow_proj);

// First, put on the worklist all Field edges from Connection Graph
// which is more accurate than putting immediate users from Ideal Graph.
Expand Down Expand Up @@ -4519,7 +4540,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
// finishes. For now we just try to split out the SR inputs of the merge.
Node* parent = n->in(1);
if (reducible_merges.member(n)) {
reduce_phi(n->as_Phi(), alloc_worklist, memnode_worklist);
reduce_phi(n->as_Phi(), alloc_worklist);
#ifdef ASSERT
if (VerifyReduceAllocationMerges) {
reduced_merges.push(n);
Expand Down Expand Up @@ -4711,11 +4732,13 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
}
if (n->is_Phi() || n->is_ClearArray()) {
// we don't need to do anything, but the users must be pushed
} else if (n->is_MemBar()) { // Initialize, MemBar nodes
// we don't need to do anything, but the users must be pushed
n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
if (n == nullptr) {
continue;
} else if (n->is_MemBar()) { // MemBar nodes
if (!n->is_Initialize()) { // memory projections for Initialize pushed below (so we get to all their uses)
// we don't need to do anything, but the users must be pushed
n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
if (n == nullptr) {
continue;
}
Comment on lines +4738 to +4741
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we now have multiple NarrowMemProj? If so, what would happen here?

}
} else if (n->is_CallLeaf()) {
// Runtime calls with narrow memory input (no MergeMem node)
Expand All @@ -4732,6 +4755,8 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
// get the memory projection
n = n->find_out_with(Op_SCMemProj);
assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
} else if (n->is_Proj()) {
assert(n->in(0)->is_Initialize(), "we only push memory projections for Initialize");
} else {
#ifdef ASSERT
if (!n->is_Mem()) {
Expand Down Expand Up @@ -4775,6 +4800,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
memnode_worklist.append_if_missing(use);
}
} else if (use->is_Proj()) {
assert(n->is_Initialize(), "We only push projections of Initialize");
if (use->as_Proj()->_con == TypeFunc::Memory) { // Ignore precedent edge
memnode_worklist.append_if_missing(use);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you know why we are using a GrowableArray here? Would a UnikeNodeList not serve us better since we are always doing append_if_missing, which essentially has to scan the whole GrowableArray?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's not clear to me. I filed: https://bugs.openjdk.org/browse/JDK-8358560 as a follow up.

}
#ifdef ASSERT
} else if(use->is_Mem()) {
assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
Expand Down Expand Up @@ -4826,7 +4856,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
// First, update mergemem by moving memory nodes to corresponding slices
// if their type became more precise since this mergemem was created.
while (mem->is_Mem()) {
const Type *at = igvn->type(mem->in(MemNode::Address));
const Type* at = igvn->type(mem->in(MemNode::Address));
if (at != Type::TOP) {
assert (at->isa_ptr() != nullptr, "pointer type required.");
uint idx = (uint)_compile->get_alias_index(at->is_ptr());
Expand Down Expand Up @@ -4946,7 +4976,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
record_for_optimizer(n);
} else {
assert(n->is_Allocate() || n->is_CheckCastPP() ||
n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
n->is_AddP() || n->is_Phi() || n->is_NarrowMemProj(), "unknown node used for set_map()");
}
}
#if 0 // ifdef ASSERT
Expand Down
8 changes: 5 additions & 3 deletions src/hotspot/share/opto/escape.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -563,8 +563,10 @@ class ConnectionGraph: public ArenaObj {
// Memory Phi - most recent unique Phi split out
// from this Phi
// MemNode - new memory input for this node
// ChecCastPP - allocation that this is a cast of
// CheckCastPP - allocation that this is a cast of
// allocation - CheckCastPP of the allocation
Comment on lines 563 to 567
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add a new entry here explaining how _node_map is used for NarrowMemProjNode nodes.

// NarrowMem - newly created projection (type includes instance_id) from projection created
// before EA

// manage entries in _node_map

Expand Down Expand Up @@ -609,11 +611,11 @@ class ConnectionGraph: public ArenaObj {
bool can_reduce_phi_check_inputs(PhiNode* ophi) const;

void reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *> &alloc_worklist);
void reduce_phi_on_castpp_field_load(Node* castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist);
void reduce_phi_on_castpp_field_load(Node* castpp, GrowableArray<Node*> &alloc_worklist);
void reduce_phi_on_cmp(Node* cmp);
bool reduce_phi_on_safepoints(PhiNode* ophi);
bool reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints);
void reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist);
void reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist);

void set_not_scalar_replaceable(PointsToNode* ptn NOT_PRODUCT(COMMA const char* reason)) const {
#ifndef PRODUCT
Expand Down
15 changes: 9 additions & 6 deletions src/hotspot/share/opto/graphKit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3641,14 +3641,17 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
record_for_igvn(minit_in); // fold it up later, if possible
Node* minit_out = memory(rawidx);
assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
// Add an edge in the MergeMem for the header fields so an access
// to one of those has correct memory state
set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
int mark_idx = C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes()));
// Add an edge in the MergeMem for the header fields so an access to one of those has correct memory state.
// Use one NarrowMemProjNode per slice to properly record the adr type of each slice. The Initialize node will have
// multiple projections as a result.
set_memory(_gvn.transform(new NarrowMemProjNode(init, C->get_adr_type(mark_idx))), mark_idx);
int klass_idx = C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes()));
set_memory(_gvn.transform(new NarrowMemProjNode(init, C->get_adr_type(klass_idx))), klass_idx);
if (oop_type->isa_aryptr()) {
const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
int elemidx = C->get_alias_index(telemref);
hook_memory_on_init(*this, elemidx, minit_in, minit_out);
hook_memory_on_init(*this, elemidx, minit_in, _gvn.transform(new NarrowMemProjNode(init, C->get_adr_type(elemidx))));
} else if (oop_type->isa_instptr()) {
ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
Expand All @@ -3657,7 +3660,7 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
continue; // do not bother to track really large numbers of fields
// Find (or create) the alias category for this field:
int fieldidx = C->alias_type(field)->index();
hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
hook_memory_on_init(*this, fieldidx, minit_in, _gvn.transform(new NarrowMemProjNode(init, C->get_adr_type(fieldidx))));
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/opto/idealGraphPrinter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -589,7 +589,7 @@ void IdealGraphPrinter::visit_node(Node* n, bool edges) {
t->dump_on(&s2);
} else if( t == Type::MEMORY ) {
s2.print(" Memory:");
MemNode::dump_adr_type(node, node->adr_type(), &s2);
MemNode::dump_adr_type(node->adr_type(), &s2);
}

assert(s2.size() < sizeof(buffer), "size in range");
Expand Down
18 changes: 15 additions & 3 deletions src/hotspot/share/opto/library_call.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5544,7 +5544,7 @@ void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, No
InitializeNode* init = alloc->initialization();
Node* alloc_mem = alloc->in(TypeFunc::Memory);
C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
init->replace_mem_projs_by(alloc_mem, C);

// The CastIINode created in GraphKit::new_array (in AllocateArrayNode::make_ideal_length) must stay below
// the allocation (i.e. is only valid if the allocation succeeds):
Expand Down Expand Up @@ -5595,8 +5595,20 @@ void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, No
}
const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
int elemidx = C->get_alias_index(telemref);
set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
// Need to properly move every memory projection for the Initialize
#ifdef ASSERT
int mark_idx = C->get_alias_index(ary_type->add_offset(oopDesc::mark_offset_in_bytes()));
int klass_idx = C->get_alias_index(ary_type->add_offset(oopDesc::klass_offset_in_bytes()));
#endif
auto move_proj = [&](ProjNode* proj) {
int alias_idx = C->get_alias_index(proj->adr_type());
assert(alias_idx == Compile::AliasIdxRaw ||
alias_idx == elemidx ||
alias_idx == mark_idx ||
alias_idx == klass_idx, "should be raw memory or array element type");
set_memory(proj, alias_idx);
};
init->for_each_proj(move_proj, TypeFunc::Memory);

Node* allocx = _gvn.transform(alloc);
assert(allocx == alloc, "where has the allocation gone?");
Expand Down
4 changes: 3 additions & 1 deletion src/hotspot/share/opto/loopTransform.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4031,7 +4031,9 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
call->init_req(TypeFunc::I_O, C->top()); // Does no I/O.
call->init_req(TypeFunc::Memory, mem_phi->in(LoopNode::EntryControl));
call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out_or_null(TypeFunc::ReturnAdr));
call->init_req(TypeFunc::FramePtr, C->start()->proj_out_or_null(TypeFunc::FramePtr));
Node* frame = new ParmNode(C->start(), TypeFunc::FramePtr);
_igvn.register_new_node_with_optimizer(frame);
call->init_req(TypeFunc::FramePtr, frame);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This seems unrelated. Is it needed?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's one of the things mentioned in that comment:
#24570 (comment)

"I added asserts to catch cases where proj_out is called but the node has more than one matching projection. With those asserts, I caught some false positive/cases where we got lucky and worked around them by reworking the code so it doesn't use proj_out. That's the case in PhaseIdealLoop::intrinsify_fill(): we can end up there with more than one FramePtr projection because the code pattern used elsewhere is to add one more projection and let identical projections common during igvn. "

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are we just lucky that we don't have the same problem with ReturnAdr here?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, most likely.
This is also a pretty harmless corner case: if there is more than one Parm projection, the assert in proj_out catches it even though it does no harm to have more than one projection in this particular case. So this change is here, not to fix some broken code, but to make it possible to have a strict assert in proj_out.

_igvn.register_new_node_with_optimizer(call);
result_ctrl = new ProjNode(call,TypeFunc::Control);
_igvn.register_new_node_with_optimizer(result_ctrl);
Expand Down
5 changes: 3 additions & 2 deletions src/hotspot/share/opto/loopopts.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2678,8 +2678,9 @@ void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* l
if (head->is_strip_mined() && mode != IgnoreStripMined) {
CountedLoopNode* cl = head->as_CountedLoop();
CountedLoopEndNode* cle = cl->loopexit();
Node* cle_out = cle->proj_out_or_null(false);
if (use == cle_out) {
// is use the projection that exits the loop from the CountedLoopEndNode?
if (use->in(0) == cle) {
IfFalseNode* cle_out = use->as_IfFalse();
IfNode* le = cl->outer_loop_end();
use = le->proj_out(false);
use_loop = get_loop(use);
Expand Down
Loading