Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,6 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
DecoratorSet decorators = access.decorators();

Node* adr = access.addr().node();
const TypePtr* adr_type = access.addr().type();

bool mismatched = (decorators & C2_MISMATCHED) != 0;
bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
Expand All @@ -207,7 +206,7 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
Compile* C = Compile::current();
Node* mem = kit->immutable_memory();
load = LoadNode::make(kit->gvn(), control, mem, adr,
adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
val_type, access.type(), mo, dep, requires_atomic_access,
unaligned, mismatched, unsafe, access.barrier_data());
load = kit->gvn().transform(load);
} else {
Expand All @@ -221,8 +220,8 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
Node* control = control_dependent ? opt_access.ctl() : nullptr;
MergeMemNode* mm = opt_access.mem();
PhaseGVN& gvn = opt_access.gvn();
Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo, dep,
Node* mem = mm->memory_at(gvn.C->get_alias_index(access.addr().type()));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we get rid of all uses of access.addr().type()?

load = LoadNode::make(gvn, control, mem, adr, val_type, access.type(), mo, dep,
requires_atomic_access, unaligned, mismatched, unsafe, access.barrier_data());
load = gvn.transform(load);
}
Expand Down
7 changes: 2 additions & 5 deletions src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,6 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
// Combine card table base and card offset
Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset);

// Get the alias_index for raw card-mark memory
int adr_type = Compile::AliasIdxRaw;

// Dirty card value to store
Node* dirty = __ ConI(CardTable::dirty_card_val());

Expand All @@ -105,12 +102,12 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
// UseCondCardMark enables MP "polite" conditional card mark
// stores. In theory we could relax the load from ctrl() to
// no_ctrl, but that doesn't buy much latitude.
Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We could asssert that C->get_alias_index(kit->type(card_adr) == Compile::AliasIdxRaw, that is that computed slice is the same as hardcoded slide. Similar asserts could be added for every location where a slice/address type is removed in this patch.

__ if_then(card_val, BoolTest::ne, dirty);
}

// Smash dirty value into card
__ store(__ ctrl(), card_adr, dirty, T_BYTE, adr_type, MemNode::unordered);
__ store(__ ctrl(), card_adr, dirty, T_BYTE, MemNode::unordered);

if (UseCondCardMark) {
__ end_if();
Expand Down
18 changes: 8 additions & 10 deletions src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -215,15 +215,15 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
// Now some of the values
Node* marking;
Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE);
marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");

// if (!marking)
__ if_then(marking, BoolTest::ne, zero, unlikely); {
BasicType index_bt = TypeX_X->basic_type();
assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt);

if (do_load) {
// load original value
Expand All @@ -233,7 +233,7 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,

// if (pre_val != nullptr)
__ if_then(pre_val, BoolTest::ne, kit->null()); {
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS);

// is the queue for this thread full?
__ if_then(index, BoolTest::ne, zeroX, likely); {
Expand All @@ -243,9 +243,9 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,

// Now get the buffer location we will log the previous value into and store it
Node *log_addr = __ AddP(no_base, buffer, next_index);
__ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
__ store(__ ctrl(), log_addr, pre_val, T_OBJECT, MemNode::unordered);
// update the index
__ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
__ store(__ ctrl(), index_adr, next_index, index_bt, MemNode::unordered);

} __ else_(); {

Expand Down Expand Up @@ -483,16 +483,14 @@ void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit,

Node* curr_ct_holder_offset = __ ConX(in_bytes(ShenandoahThreadLocalData::card_table_offset()));
Node* curr_ct_holder_addr = __ AddP(__ top(), tls, curr_ct_holder_offset);
Node* curr_ct_base_addr = __ load( __ ctrl(), curr_ct_holder_addr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
Node* curr_ct_base_addr = __ load( __ ctrl(), curr_ct_holder_addr, TypeRawPtr::NOTNULL, T_ADDRESS);

// Divide by card size
Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift()) );

// Combine card table base and card offset
Node* card_adr = __ AddP(__ top(), curr_ct_base_addr, card_offset);

// Get the alias_index for raw card-mark memory
int adr_type = Compile::AliasIdxRaw;
Node* zero = __ ConI(0); // Dirty card value

if (UseCondCardMark) {
Expand All @@ -503,12 +501,12 @@ void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit,
// UseCondCardMark enables MP "polite" conditional card mark
// stores. In theory we could relax the load from ctrl() to
// no_ctrl, but that doesn't buy much latitude.
Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE);
__ if_then(card_val, BoolTest::ne, zero);
}

// Smash zero into card
__ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
__ store(__ ctrl(), card_adr, zero, T_BYTE, MemNode::unordered);

if (UseCondCardMark) {
__ end_if();
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/opto/callnode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1661,12 +1661,13 @@ void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer)
_is_allocation_MemBar_redundant = true;
}
}

Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) {
Node* mark_node = nullptr;
if (UseCompactObjectHeaders) {
Node* klass_node = in(AllocateNode::KlassNode);
Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset()))));
mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
} else {
// For now only enable fast locking for non-array types
mark_node = phase->MakeConX(markWord::prototype().value());
Expand Down
8 changes: 3 additions & 5 deletions src/hotspot/share/opto/graphKit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1576,7 +1576,7 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
const TypePtr* adr_type = nullptr; // debug-mode-only argument
DEBUG_ONLY(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
ld = _gvn.transform(ld);
if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
// Improve graph before escape analysis and boxing elimination.
Expand Down Expand Up @@ -2979,8 +2979,7 @@ void GraphKit::guard_klass_being_initialized(Node* klass) {
int init_state_off = in_bytes(InstanceKlass::init_state_offset());
Node* adr = basic_plus_adr(top(), klass, init_state_off);
Node* init_state = LoadNode::make(_gvn, nullptr, immutable_memory(), adr,
adr->bottom_type()->is_ptr(), TypeInt::BYTE,
T_BYTE, MemNode::acquire);
TypeInt::BYTE, T_BYTE, MemNode::acquire);
init_state = _gvn.transform(init_state);

Node* being_initialized_state = makecon(TypeInt::make(InstanceKlass::being_initialized));
Expand All @@ -2998,8 +2997,7 @@ void GraphKit::guard_init_thread(Node* klass) {
Node* adr = basic_plus_adr(top(), klass, init_thread_off);

Node* init_thread = LoadNode::make(_gvn, nullptr, immutable_memory(), adr,
adr->bottom_type()->is_ptr(), TypePtr::NOTNULL,
T_ADDRESS, MemNode::unordered);
TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
init_thread = _gvn.transform(init_thread);

Node* cur_thread = _gvn.transform(new ThreadLocalNode());
Expand Down
7 changes: 3 additions & 4 deletions src/hotspot/share/opto/idealKit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -347,23 +347,22 @@ Node* IdealKit::load(Node* ctl,
Node* adr,
const Type* t,
BasicType bt,
int adr_idx,
bool require_atomic_access,
MemNode::MemOrd mo,
LoadNode::ControlDependency control_dependency) {

int adr_idx = C->get_alias_index(_gvn.type(adr)->isa_ptr());
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = nullptr; // debug-mode-only argument
DEBUG_ONLY(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access);
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, t, bt, mo, control_dependency, require_atomic_access);
return transform(ld);
}

Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
int adr_idx,
MemNode::MemOrd mo, bool require_atomic_access,
bool mismatched) {
int adr_idx = C->get_alias_index(_gvn.type(adr)->isa_ptr());
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory");
const TypePtr* adr_type = nullptr;
DEBUG_ONLY(adr_type = C->get_adr_type(adr_idx));
Expand Down
2 changes: 0 additions & 2 deletions src/hotspot/share/opto/idealKit.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,6 @@ class IdealKit: public StackObj {
Node* adr,
const Type* t,
BasicType bt,
int adr_idx,
bool require_atomic_access = false,
MemNode::MemOrd mo = MemNode::unordered,
LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest);
Expand All @@ -229,7 +228,6 @@ class IdealKit: public StackObj {
Node* adr,
Node* val,
BasicType bt,
int adr_idx,
MemNode::MemOrd mo,
bool require_atomic_access = false,
bool mismatched = false);
Expand Down
14 changes: 7 additions & 7 deletions src/hotspot/share/opto/library_call.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -974,7 +974,7 @@ Node* LibraryCallKit::current_thread_helper(Node*& tls_output, ByteSize handle_o

Node* thread_obj_handle
= (is_immutable
? LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(),
? LoadNode::make(_gvn, nullptr, immutable_memory(), p,
TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered)
: make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered));
thread_obj_handle = _gvn.transform(thread_obj_handle);
Expand Down Expand Up @@ -3005,7 +3005,7 @@ bool LibraryCallKit::inline_native_notify_jvmti_funcs(address funcAddr, const ch
Node* ONE = ideal.ConI(1);
Node* hide = is_start ? ideal.ConI(0) : (is_end ? ideal.ConI(1) : _gvn.transform(argument(1)));
Node* addr = makecon(TypeRawPtr::make((address)&JvmtiVTMSTransitionDisabler::_VTMS_notify_jvmti_events));
Node* notify_jvmti_enabled = ideal.load(ideal.ctrl(), addr, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw);
Node* notify_jvmti_enabled = ideal.load(ideal.ctrl(), addr, TypeInt::BOOL, T_BOOLEAN);

ideal.if_then(notify_jvmti_enabled, BoolTest::eq, ONE); {
sync_kit(ideal);
Expand Down Expand Up @@ -3087,10 +3087,10 @@ bool LibraryCallKit::inline_native_classID() {

__ if_then(kls, BoolTest::ne, null()); {
Node* kls_trace_id_addr = basic_plus_adr(kls, in_bytes(KLASS_TRACE_ID_OFFSET));
Node* kls_trace_id_raw = ideal.load(ideal.ctrl(), kls_trace_id_addr,TypeLong::LONG, T_LONG, Compile::AliasIdxRaw);
Node* kls_trace_id_raw = ideal.load(ideal.ctrl(), kls_trace_id_addr,TypeLong::LONG, T_LONG);

Node* epoch_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::epoch_address()));
Node* epoch = ideal.load(ideal.ctrl(), epoch_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw);
Node* epoch = ideal.load(ideal.ctrl(), epoch_address, TypeInt::BOOL, T_BOOLEAN);
epoch = _gvn.transform(new LShiftLNode(longcon(1), epoch));
Node* mask = _gvn.transform(new LShiftLNode(epoch, intcon(META_SHIFT)));
mask = _gvn.transform(new OrLNode(mask, epoch));
Expand All @@ -3115,7 +3115,7 @@ bool LibraryCallKit::inline_native_classID() {
TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL));
__ if_then(array_kls, BoolTest::ne, null()); {
Node* array_kls_trace_id_addr = basic_plus_adr(array_kls, in_bytes(KLASS_TRACE_ID_OFFSET));
Node* array_kls_trace_id_raw = ideal.load(ideal.ctrl(), array_kls_trace_id_addr, TypeLong::LONG, T_LONG, Compile::AliasIdxRaw);
Node* array_kls_trace_id_raw = ideal.load(ideal.ctrl(), array_kls_trace_id_addr, TypeLong::LONG, T_LONG);
Node* array_kls_trace_id = _gvn.transform(new URShiftLNode(array_kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT)));
ideal.set(result, _gvn.transform(new AddLNode(array_kls_trace_id, longcon(1))));
} __ else_(); {
Expand All @@ -3124,9 +3124,9 @@ bool LibraryCallKit::inline_native_classID() {
} __ end_if();

Node* signaled_flag_address = makecon(TypeRawPtr::make(JfrIntrinsicSupport::signal_address()));
Node* signaled = ideal.load(ideal.ctrl(), signaled_flag_address, TypeInt::BOOL, T_BOOLEAN, Compile::AliasIdxRaw, true, MemNode::acquire);
Node* signaled = ideal.load(ideal.ctrl(), signaled_flag_address, TypeInt::BOOL, T_BOOLEAN, true, MemNode::acquire);
__ if_then(signaled, BoolTest::ne, ideal.ConI(1)); {
ideal.store(ideal.ctrl(), signaled_flag_address, ideal.ConI(1), T_BOOLEAN, Compile::AliasIdxRaw, MemNode::release, true);
ideal.store(ideal.ctrl(), signaled_flag_address, ideal.ConI(1), T_BOOLEAN, MemNode::release, true);
} __ end_if();
} __ end_if();

Expand Down
3 changes: 1 addition & 2 deletions src/hotspot/share/opto/macro.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1166,8 +1166,7 @@ bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {

Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {
Node* adr = basic_plus_adr(base, offset);
const TypePtr* adr_type = adr->bottom_type()->is_ptr();
Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt, MemNode::unordered);
Node* value = LoadNode::make(_igvn, ctl, mem, adr, value_type, bt, MemNode::unordered);
transform_later(value);
return value;
}
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/opto/macroArrayCopy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1027,7 +1027,7 @@ bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem,
uint d_alias_idx = C->get_alias_index(adr_type);
bool is_mismatched = (basic_elem_type != T_INT);
Node* sval = transform_later(
LoadNode::make(_igvn, *ctrl, (*mem)->memory_at(s_alias_idx), sptr, s_adr_type,
LoadNode::make(_igvn, *ctrl, (*mem)->memory_at(s_alias_idx), sptr,
TypeInt::INT, T_INT, MemNode::unordered, LoadNode::DependsOnlyOnTest,
false /*require_atomic_access*/, false /*unaligned*/, is_mismatched));
Node* st = transform_later(
Expand Down
9 changes: 5 additions & 4 deletions src/hotspot/share/opto/memnode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -944,9 +944,10 @@ bool LoadNode::is_immutable_value(Node* adr) {

//----------------------------LoadNode::make-----------------------------------
// Polymorphic factory method:
Node* LoadNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, BasicType bt, MemOrd mo,
Node* LoadNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const Type* rt, BasicType bt, MemOrd mo,
ControlDependency control_dependency, bool require_atomic_access, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
Compile* C = gvn.C;
const TypePtr* adr_type = gvn.type(adr)->isa_ptr();

// sanity check the alias category against the created node type
assert(!(adr_type->isa_oopptr() &&
Expand Down Expand Up @@ -1352,7 +1353,7 @@ Node* LoadNode::convert_to_unsigned_load(PhaseGVN& gvn) {
return nullptr;
}
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
raw_adr_type(), rt, bt, _mo, _control_dependency,
rt, bt, _mo, _control_dependency,
false /*require_atomic_access*/, is_unaligned_access(), is_mismatched_access());
}

Expand All @@ -1372,7 +1373,7 @@ Node* LoadNode::convert_to_signed_load(PhaseGVN& gvn) {
return nullptr;
}
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
raw_adr_type(), rt, bt, _mo, _control_dependency,
rt, bt, _mo, _control_dependency,
false /*require_atomic_access*/, is_unaligned_access(), is_mismatched_access());
}

Expand Down Expand Up @@ -1400,7 +1401,7 @@ Node* LoadNode::convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt) {
bool require_atomic_access = (op == Op_LoadL && ((LoadLNode*)this)->require_atomic_access()) ||
(op == Op_LoadD && ((LoadDNode*)this)->require_atomic_access());
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
raw_adr_type(), rt, bt, _mo, _control_dependency,
rt, bt, _mo, _control_dependency,
require_atomic_access, is_unaligned_access(), is_mismatched);
}

Expand Down
3 changes: 1 addition & 2 deletions src/hotspot/share/opto/memnode.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -233,8 +233,7 @@ class LoadNode : public MemNode {
}

// Polymorphic factory method:
static Node* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
const TypePtr* at, const Type* rt, BasicType bt,
static Node* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr, const Type* rt, BasicType bt,
MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
bool require_atomic_access = false, bool unaligned = false, bool mismatched = false, bool unsafe = false,
uint8_t barrier_data = 0);
Expand Down
Loading