Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/hotspot/share/opto/callGenerator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,7 @@ class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
virtual void do_late_inline();

virtual void set_callee_method(ciMethod* m) {
assert(_callee == nullptr || _callee == m, "repeated inline attempt with different callee");
assert(_callee == nullptr, "repeated inlining attempt");
_callee = m;
}

Expand Down
119 changes: 47 additions & 72 deletions src/hotspot/share/opto/callnode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1091,15 +1091,6 @@ void CallJavaNode::dump_compact_spec(outputStream* st) const {
}
#endif

void CallJavaNode::register_for_late_inline() {
if (generator() != nullptr) {
Compile::current()->prepend_late_inline(generator());
set_generator(nullptr);
} else {
assert(false, "repeated inline attempt");
}
}

//=============================================================================
uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
bool CallStaticJavaNode::cmp( const Node &n ) const {
Expand All @@ -1110,35 +1101,26 @@ bool CallStaticJavaNode::cmp( const Node &n ) const {
Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
CallGenerator* cg = generator();
if (can_reshape && cg != nullptr) {
if (cg->is_mh_late_inline()) {
assert(IncrementalInlineMH, "required");
assert(cg->call_node() == this, "mismatch");
assert(cg->method()->is_method_handle_intrinsic(), "required");

// Check whether this MH handle call becomes a candidate for inlining.
ciMethod* callee = cg->method();
vmIntrinsics::ID iid = callee->intrinsic_id();
if (iid == vmIntrinsics::_invokeBasic) {
if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
register_for_late_inline();
}
} else if (iid == vmIntrinsics::_linkToNative) {
// never retry
} else {
assert(callee->has_member_arg(), "wrong type of call?");
if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
register_for_late_inline();
phase->C->inc_number_of_mh_late_inlines();
}
assert(IncrementalInlineMH, "required");
assert(cg->call_node() == this, "mismatch");
assert(cg->is_mh_late_inline(), "not virtual");

// Check whether this MH handle call becomes a candidate for inlining.
ciMethod* callee = cg->method();
vmIntrinsics::ID iid = callee->intrinsic_id();
if (iid == vmIntrinsics::_invokeBasic) {
if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
phase->C->prepend_late_inline(cg);
set_generator(nullptr);
}
} else if (iid == vmIntrinsics::_linkToNative) {
// never retry
} else {
assert(IncrementalInline, "required");
assert(!cg->method()->is_method_handle_intrinsic(), "required");
if (phase->C->print_inlining()) {
phase->C->inline_printer()->record(cg->method(), cg->call_node()->jvms(), InliningResult::FAILURE,
"static call node changed: trying again");
assert(callee->has_member_arg(), "wrong type of call?");
if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
phase->C->prepend_late_inline(cg);
set_generator(nullptr);
}
register_for_late_inline();
}
}
return CallNode::Ideal(phase, can_reshape);
Expand Down Expand Up @@ -1207,46 +1189,39 @@ bool CallDynamicJavaNode::cmp( const Node &n ) const {
Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
CallGenerator* cg = generator();
if (can_reshape && cg != nullptr) {
if (cg->is_virtual_late_inline()) {
assert(IncrementalInlineVirtual, "required");
assert(cg->call_node() == this, "mismatch");

// Recover symbolic info for method resolution.
ciMethod* caller = jvms()->method();
ciBytecodeStream iter(caller);
iter.force_bci(jvms()->bci());

bool not_used1;
ciSignature* not_used2;
ciMethod* orig_callee = iter.get_method(not_used1, &not_used2); // callee in the bytecode
ciKlass* holder = iter.get_declared_method_holder();
if (orig_callee->is_method_handle_intrinsic()) {
assert(_override_symbolic_info, "required");
orig_callee = method();
holder = method()->holder();
}
assert(IncrementalInlineVirtual, "required");
assert(cg->call_node() == this, "mismatch");
assert(cg->is_virtual_late_inline(), "not virtual");

// Recover symbolic info for method resolution.
ciMethod* caller = jvms()->method();
ciBytecodeStream iter(caller);
iter.force_bci(jvms()->bci());

bool not_used1;
ciSignature* not_used2;
ciMethod* orig_callee = iter.get_method(not_used1, &not_used2); // callee in the bytecode
ciKlass* holder = iter.get_declared_method_holder();
if (orig_callee->is_method_handle_intrinsic()) {
assert(_override_symbolic_info, "required");
orig_callee = method();
holder = method()->holder();
}

ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);

Node* receiver_node = in(TypeFunc::Parms);
const TypeOopPtr* receiver_type = phase->type(receiver_node)->isa_oopptr();
Node* receiver_node = in(TypeFunc::Parms);
const TypeOopPtr* receiver_type = phase->type(receiver_node)->isa_oopptr();

int not_used3;
bool call_does_dispatch;
ciMethod* callee = phase->C->optimize_virtual_call(caller, klass, holder, orig_callee, receiver_type, true /*is_virtual*/,
call_does_dispatch, not_used3); // out-parameters
if (!call_does_dispatch) {
// Register for late inlining.
cg->set_callee_method(callee);
register_for_late_inline(); // MH late inlining prepends to the list, so do the same
}
} else {
assert(IncrementalInline, "required");
if (phase->C->print_inlining()) {
phase->C->inline_printer()->record(cg->method(), cg->call_node()->jvms(), InliningResult::FAILURE,
"dynamic call node changed: trying again");
}
register_for_late_inline();
int not_used3;
bool call_does_dispatch;
ciMethod* callee = phase->C->optimize_virtual_call(caller, klass, holder, orig_callee, receiver_type, true /*is_virtual*/,
call_does_dispatch, not_used3); // out-parameters
if (!call_does_dispatch) {
// Register for late inlining.
cg->set_callee_method(callee);
phase->C->prepend_late_inline(cg); // MH late inlining prepends to the list, so do the same
set_generator(nullptr);
}
}
return CallNode::Ideal(phase, can_reshape);
Expand Down
1 change: 0 additions & 1 deletion src/hotspot/share/opto/callnode.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -790,7 +790,6 @@ class CallJavaNode : public CallNode {
void set_arg_escape(bool f) { _arg_escape = f; }
bool arg_escape() const { return _arg_escape; }
void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
void register_for_late_inline();

DEBUG_ONLY( bool validate_symbolic_info() const; )

Expand Down
11 changes: 0 additions & 11 deletions src/hotspot/share/opto/compile.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2085,7 +2085,6 @@ bool Compile::inline_incrementally_one() {
for (int i = 0; i < _late_inlines.length(); i++) {
_late_inlines_pos = i+1;
CallGenerator* cg = _late_inlines.at(i);
bool is_scheduled_for_igvn_before = C->igvn_worklist()->member(cg->call_node());
bool does_dispatch = cg->is_virtual_late_inline() || cg->is_mh_late_inline();
if (inlining_incrementally() || does_dispatch) { // a call can be either inlined or strength-reduced to a direct call
cg->do_late_inline();
Expand All @@ -2096,16 +2095,6 @@ bool Compile::inline_incrementally_one() {
_late_inlines_pos = i+1; // restore the position in case new elements were inserted
print_method(PHASE_INCREMENTAL_INLINE_STEP, 3, cg->call_node());
break; // process one call site at a time
} else {
bool is_scheduled_for_igvn_after = C->igvn_worklist()->member(cg->call_node());
if (!is_scheduled_for_igvn_before && is_scheduled_for_igvn_after) {
// Avoid potential infinite loop if node already in the IGVN list
assert(false, "scheduled for IGVN during inlining attempt");
} else {
// Ensure call node has not disappeared from IGVN worklist during a failed inlining attempt
assert(!is_scheduled_for_igvn_before || is_scheduled_for_igvn_after, "call node removed from IGVN list during inlining pass");
cg->call_node()->set_generator(cg);
}
}
} else {
// Ignore late inline direct calls when inlining is not allowed.
Expand Down
1 change: 1 addition & 0 deletions test/hotspot/jtreg/ProblemList.txt
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ compiler/c2/irTests/TestDuplicateBackedge.java 8318904 generic-all
compiler/codecache/jmx/PoolsIndependenceTest.java 8264632 macosx-all

compiler/vectorapi/reshape/TestVectorReinterpret.java 8320897,8348519 aix-ppc64,linux-ppc64le,linux-s390x
compiler/vectorapi/VectorLogicalOpIdentityTest.java 8302459 linux-x64,windows-x64
compiler/vectorapi/VectorRebracket128Test.java 8330538 generic-all

compiler/jvmci/TestUncaughtErrorInCompileMethod.java 8309073 generic-all
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -32,7 +32,7 @@

/**
* @test
* @bug 8325520 8302459
* @bug 8325520
* @library /test/lib /
* @summary Don't allow folding of Load/Store vectors when using incompatible indices or masks
* @modules jdk.incubator.vector
Expand Down Expand Up @@ -1398,12 +1398,7 @@ public static void testFloatVectorLoadMaskedStoreVector() {
public static void main(String[] args) {
TestFramework testFramework = new TestFramework();
testFramework.setDefaultWarmup(10000)
.addFlags("--add-modules=jdk.incubator.vector")
.addFlags("--add-modules=jdk.incubator.vector", "-XX:+IgnoreUnrecognizedVMOptions", "-XX:+IncrementalInlineForceCleanup")
.start();
testFramework = new TestFramework();
testFramework.setDefaultWarmup(10000)
.addFlags("--add-modules=jdk.incubator.vector", "-XX:-TieredCompilation")
.start();

}
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2022, 2023, Arm Limited. All rights reserved.
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -41,7 +41,7 @@

/**
* @test
* @bug 8288294 8302459
* @bug 8288294
* @key randomness
* @library /test/lib /
* @summary Add identity transformations for vector logic operations
Expand Down Expand Up @@ -761,6 +761,5 @@ public static void testMaskXorSame() {

public static void main(String[] args) {
TestFramework.runWithFlags("--add-modules=jdk.incubator.vector");
TestFramework.runWithFlags("--add-modules=jdk.incubator.vector", "-XX:-TieredCompilation");
}
}