Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8285301: C2: assert(!requires_atomic_access) failed: can't ensure atomicity #454

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
@@ -91,19 +91,19 @@ Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) cons
MemNode::MemOrd mo = access.mem_node_mo();

Node* store;
BasicType bt = access.type();
if (access.is_parse_access()) {
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);

GraphKit* kit = parse_access.kit();
if (access.type() == T_DOUBLE) {
if (bt == T_DOUBLE) {
Node* new_val = kit->dstore_rounding(val.node());
val.set_node(new_val);
}

store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(),
access.addr().type(), mo, requires_atomic_access, unaligned, mismatched, unsafe);
store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), bt,
access.addr().type(), mo, requires_atomic_access, unaligned, mismatched, unsafe);
} else {
assert(!requires_atomic_access, "not yet supported");
assert(access.is_opt_access(), "either parse or opt access");
C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
Node* ctl = opt_access.ctl();
@@ -113,7 +113,7 @@ Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) cons
int alias = gvn.C->get_alias_index(adr_type);
Node* mem = mm->memory_at(alias);

StoreNode* st = StoreNode::make(gvn, ctl, mem, access.addr().node(), adr_type, val.node(), access.type(), mo);
StoreNode* st = StoreNode::make(gvn, ctl, mem, access.addr().node(), adr_type, val.node(), bt, mo, requires_atomic_access);
if (unaligned) {
st->set_unaligned_access();
}
@@ -156,28 +156,26 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
Node* control = control_dependent ? kit->control() : NULL;

if (immutable) {
assert(!requires_atomic_access, "can't ensure atomicity");
Compile* C = Compile::current();
Node* mem = kit->immutable_memory();
load = LoadNode::make(kit->gvn(), control, mem, adr,
adr_type, val_type, access.type(), mo, dep, unaligned,
mismatched, unsafe, access.barrier_data());
adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
unaligned, mismatched, unsafe, access.barrier_data());
load = kit->gvn().transform(load);
} else {
load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
dep, requires_atomic_access, unaligned, mismatched, unsafe,
access.barrier_data());
}
} else {
assert(!requires_atomic_access, "not yet supported");
assert(access.is_opt_access(), "either parse or opt access");
C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
Node* control = control_dependent ? opt_access.ctl() : NULL;
MergeMemNode* mm = opt_access.mem();
PhaseGVN& gvn = opt_access.gvn();
Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo,
dep, unaligned, mismatched, unsafe, access.barrier_data());
load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo, dep,
requires_atomic_access, unaligned, mismatched, unsafe, access.barrier_data());
load = gvn.transform(load);
}
access.set_raw_access(load);
@@ -1537,14 +1537,7 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
const TypePtr* adr_type = NULL; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld;
if (require_atomic_access && bt == T_LONG) {
ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
} else if (require_atomic_access && bt == T_DOUBLE) {
ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
} else {
ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
}
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
ld = _gvn.transform(ld);
if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
// Improve graph before escape analysis and boxing elimination.
@@ -1564,14 +1557,7 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
const TypePtr* adr_type = NULL;
debug_only(adr_type = C->get_adr_type(adr_idx));
Node *mem = memory(adr_idx);
Node* st;
if (require_atomic_access && bt == T_LONG) {
st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
} else if (require_atomic_access && bt == T_DOUBLE) {
st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
} else {
st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
}
Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access);
if (unaligned) {
st->as_Store()->set_unaligned_access();
}
@@ -358,12 +358,7 @@ Node* IdealKit::load(Node* ctl,
const TypePtr* adr_type = NULL; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld;
if (require_atomic_access && bt == T_LONG) {
ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo);
} else {
ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo);
}
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, LoadNode::DependsOnlyOnTest, require_atomic_access);
return transform(ld);
}

@@ -375,12 +370,7 @@ Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
const TypePtr* adr_type = NULL;
debug_only(adr_type = C->get_adr_type(adr_idx));
Node *mem = memory(adr_idx);
Node* st;
if (require_atomic_access && bt == T_LONG) {
st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
} else {
st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
}
Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access);
if (mismatched) {
st->as_Store()->set_mismatched_access();
}
@@ -1017,7 +1017,7 @@ bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem,
Node* sval = transform_later(
LoadNode::make(_igvn, *ctrl, (*mem)->memory_at(s_alias_idx), sptr, s_adr_type,
TypeInt::INT, T_INT, MemNode::unordered, LoadNode::DependsOnlyOnTest,
false /*unaligned*/, is_mismatched));
false /*require_atomic_access*/, false /*unaligned*/, is_mismatched));
Node* st = transform_later(
StoreNode::make(_igvn, *ctrl, (*mem)->memory_at(d_alias_idx), dptr, adr_type,
sval, T_INT, MemNode::unordered));
@@ -861,8 +861,8 @@ bool LoadNode::is_immutable_value(Node* adr) {

//----------------------------LoadNode::make-----------------------------------
// Polymorphic factory method:
Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo,
ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
Node* LoadNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, BasicType bt, MemOrd mo,
ControlDependency control_dependency, bool require_atomic_access, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
Compile* C = gvn.C;

// sanity check the alias category against the created node type
@@ -884,9 +884,9 @@ Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypeP
case T_INT: load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
case T_CHAR: load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
case T_SHORT: load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
case T_LONG: load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency); break;
case T_LONG: load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic_access); break;
case T_FLOAT: load = new LoadFNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break;
case T_DOUBLE: load = new LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break;
case T_DOUBLE: load = new LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic_access); break;
case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency); break;
case T_OBJECT:
#ifdef _LP64
@@ -922,42 +922,6 @@ Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypeP
return load;
}

LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo,
ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
bool require_atomic = true;
LoadLNode* load = new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic);
if (unaligned) {
load->set_unaligned_access();
}
if (mismatched) {
load->set_mismatched_access();
}
if (unsafe) {
load->set_unsafe_access();
}
load->set_barrier_data(barrier_data);
return load;
}

LoadDNode* LoadDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo,
ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
bool require_atomic = true;
LoadDNode* load = new LoadDNode(ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic);
if (unaligned) {
load->set_unaligned_access();
}
if (mismatched) {
load->set_mismatched_access();
}
if (unsafe) {
load->set_unsafe_access();
}
load->set_barrier_data(barrier_data);
return load;
}



//------------------------------hash-------------------------------------------
uint LoadNode::hash() const {
// unroll addition of interesting fields
@@ -1288,7 +1252,7 @@ Node* LoadNode::convert_to_unsigned_load(PhaseGVN& gvn) {
}
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
raw_adr_type(), rt, bt, _mo, _control_dependency,
is_unaligned_access(), is_mismatched_access());
false /*require_atomic_access*/, is_unaligned_access(), is_mismatched_access());
}

// Construct an equivalent signed load.
@@ -1308,7 +1272,7 @@ Node* LoadNode::convert_to_signed_load(PhaseGVN& gvn) {
}
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
raw_adr_type(), rt, bt, _mo, _control_dependency,
is_unaligned_access(), is_mismatched_access());
false /*require_atomic_access*/, is_unaligned_access(), is_mismatched_access());
}

bool LoadNode::has_reinterpret_variant(const Type* rt) {
@@ -1331,9 +1295,12 @@ Node* LoadNode::convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt) {
if (raw_type == NULL) {
is_mismatched = true; // conservatively match all non-raw accesses as mismatched
}
const int op = Opcode();
bool require_atomic_access = (op == Op_LoadL && ((LoadLNode*)this)->require_atomic_access()) ||
(op == Op_LoadD && ((LoadDNode*)this)->require_atomic_access());
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
raw_adr_type(), rt, bt, _mo, _control_dependency,
is_unaligned_access(), is_mismatched);
require_atomic_access, is_unaligned_access(), is_mismatched);
}

bool StoreNode::has_reinterpret_variant(const Type* vt) {
@@ -1351,7 +1318,11 @@ bool StoreNode::has_reinterpret_variant(const Type* vt) {
Node* StoreNode::convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt) {
BasicType bt = vt->basic_type();
assert(has_reinterpret_variant(vt), "no reinterpret variant: %s %s", Name(), type2name(bt));
StoreNode* st = StoreNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), raw_adr_type(), val, bt, _mo);
const int op = Opcode();
bool require_atomic_access = (op == Op_StoreL && ((StoreLNode*)this)->require_atomic_access()) ||
(op == Op_StoreD && ((StoreDNode*)this)->require_atomic_access());
StoreNode* st = StoreNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
raw_adr_type(), val, bt, _mo, require_atomic_access);

bool is_mismatched = is_mismatched_access();
const TypeRawPtr* raw_type = gvn.type(in(MemNode::Memory))->isa_rawptr();
@@ -2588,7 +2559,7 @@ Node* LoadRangeNode::Identity(PhaseGVN* phase) {
//=============================================================================
//---------------------------StoreNode::make-----------------------------------
// Polymorphic factory method:
StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo, bool require_atomic_access) {
assert((mo == unordered || mo == release), "unexpected");
Compile* C = gvn.C;
assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
@@ -2600,9 +2571,9 @@ StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const
case T_INT: return new StoreINode(ctl, mem, adr, adr_type, val, mo);
case T_CHAR:
case T_SHORT: return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
case T_LONG: return new StoreLNode(ctl, mem, adr, adr_type, val, mo);
case T_LONG: return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic_access);
case T_FLOAT: return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
case T_DOUBLE: return new StoreDNode(ctl, mem, adr, adr_type, val, mo);
case T_DOUBLE: return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic_access);
case T_METADATA:
case T_ADDRESS:
case T_OBJECT:
@@ -2626,17 +2597,6 @@ StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const
}
}

StoreLNode* StoreLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
bool require_atomic = true;
return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
}

StoreDNode* StoreDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
bool require_atomic = true;
return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
}


//--------------------------bottom_type----------------------------------------
const Type *StoreNode::bottom_type() const {
return Type::MEMORY;
@@ -226,10 +226,10 @@ class LoadNode : public MemNode {
}

// Polymorphic factory method:
static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
const TypePtr* at, const Type *rt, BasicType bt,
static Node* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
const TypePtr* at, const Type* rt, BasicType bt,
MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
bool unaligned = false, bool mismatched = false, bool unsafe = false,
bool require_atomic_access = false, bool unaligned = false, bool mismatched = false, bool unsafe = false,
uint8_t barrier_data = 0);

virtual uint hash() const; // Check the type
@@ -415,9 +415,7 @@ class LoadLNode : public LoadNode {
virtual int store_Opcode() const { return Op_StoreL; }
virtual BasicType memory_type() const { return T_LONG; }
bool require_atomic_access() const { return _require_atomic_access; }
static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);

#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st);
@@ -467,9 +465,7 @@ class LoadDNode : public LoadNode {
virtual int store_Opcode() const { return Op_StoreD; }
virtual BasicType memory_type() const { return T_DOUBLE; }
bool require_atomic_access() const { return _require_atomic_access; }
static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);

#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st);
@@ -610,8 +606,9 @@ class StoreNode : public MemNode {
// procedure must indicate that the store requires `release'
// semantics, if the stored value is an object reference that might
// point to a new object and may become externally visible.
static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
const TypePtr* at, Node *val, BasicType bt, MemOrd mo);
static StoreNode* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
const TypePtr* at, Node* val, BasicType bt,
MemOrd mo, bool require_atomic_access = false);

virtual uint hash() const; // Check the type

@@ -692,7 +689,7 @@ class StoreLNode : public StoreNode {
virtual int Opcode() const;
virtual BasicType memory_type() const { return T_LONG; }
bool require_atomic_access() const { return _require_atomic_access; }
static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);

#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
StoreNode::dump_spec(st);
@@ -728,7 +725,7 @@ class StoreDNode : public StoreNode {
virtual int Opcode() const;
virtual BasicType memory_type() const { return T_DOUBLE; }
bool require_atomic_access() const { return _require_atomic_access; }
static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);

#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
StoreNode::dump_spec(st);