Skip to content
Permalink
Browse files
8266504: Remove leftovers from BarrierSetAssemblerC1
Reviewed-by: thartmann
  • Loading branch information
rkennke committed May 5, 2021
1 parent 6018336 commit 1885c83aca4f7bae43c5dfb9de185a4253d9fe2b
@@ -968,10 +968,6 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
index = tmp;
}

if (is_updateBytes) {
base_op = access_resolve(ACCESS_READ, base_op);
}

if (offset) {
LIR_Opr tmp = new_pointer_register();
__ add(base_op, LIR_OprFact::intConst(offset), tmp);
@@ -1050,10 +1046,6 @@ void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
index = tmp;
}

if (is_updateBytes) {
base_op = access_resolve(ACCESS_READ, base_op);
}

if (offset) {
LIR_Opr tmp = new_pointer_register();
__ add(base_op, LIR_OprFact::intConst(offset), tmp);
@@ -1066,10 +1066,6 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
}
#endif

if (is_updateBytes) {
base_op = access_resolve(IS_NOT_NULL | ACCESS_READ, base_op);
}

LIR_Address* a = new LIR_Address(base_op,
index,
offset,
@@ -1127,15 +1123,15 @@ void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
constant_aOffset = result_aOffset->as_jlong();
result_aOffset = LIR_OprFact::illegalOpr;
}
LIR_Opr result_a = access_resolve(ACCESS_READ, a.result());
LIR_Opr result_a = a.result();

long constant_bOffset = 0;
LIR_Opr result_bOffset = bOffset.result();
if (result_bOffset->is_constant()) {
constant_bOffset = result_bOffset->as_jlong();
result_bOffset = LIR_OprFact::illegalOpr;
}
LIR_Opr result_b = access_resolve(ACCESS_READ, b.result());
LIR_Opr result_b = b.result();

#ifndef _LP64
result_a = new_register(T_INT);
@@ -1793,15 +1793,6 @@ LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType ty
}
}

LIR_Opr LIRGenerator::access_resolve(DecoratorSet decorators, LIR_Opr obj) {
// Use stronger ACCESS_WRITE|ACCESS_READ by default.
if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
decorators |= ACCESS_READ | ACCESS_WRITE;
}

return _barrier_set->resolve(this, decorators, obj);
}

void LIRGenerator::do_LoadField(LoadField* x) {
bool needs_patching = x->needs_patching();
bool is_volatile = x->field()->is_volatile();
@@ -1879,12 +1870,11 @@ void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
if (GenerateRangeChecks) {
CodeEmitInfo* info = state_for(x);
CodeStub* stub = new RangeCheckStub(info, index.result());
LIR_Opr buf_obj = access_resolve(IS_NOT_NULL | ACCESS_READ, buf.result());
if (index.result()->is_constant()) {
cmp_mem_int(lir_cond_belowEqual, buf_obj, java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
__ branch(lir_cond_belowEqual, stub);
} else {
cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf_obj,
cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
java_nio_Buffer::limit_offset(), T_INT, info);
__ branch(lir_cond_aboveEqual, stub);
}
@@ -301,8 +301,6 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& value);

LIR_Opr access_resolve(DecoratorSet decorators, LIR_Opr obj);

// These need to guarantee JMM volatile semantics are preserved on each platform
// and requires one implementation per architecture.
LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
@@ -296,12 +296,6 @@ const char* Runtime1::name_for_address(address entry) {
if (entry == entry_for((StubID)id)) return name_for((StubID)id);
}

BarrierSetC1* bsc1 = BarrierSet::barrier_set()->barrier_set_c1();
const char* name = bsc1->rtcall_name_for_address(entry);
if (name != NULL) {
return name;
}

#define FUNCTION_CASE(a, f) \
if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f

@@ -343,7 +343,3 @@ void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) {
}
}
}

LIR_Opr BarrierSetC1::resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj) {
return obj;
}
@@ -135,10 +135,6 @@ class BarrierSetC1: public CHeapObj<mtGC> {
virtual LIR_Opr atomic_xchg_at(LIRAccess& access, LIRItem& value);
virtual LIR_Opr atomic_add_at(LIRAccess& access, LIRItem& value);

virtual LIR_Opr resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj);

virtual const char* rtcall_name_for_address(address entry) { return NULL; }

virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob) {}
};

0 comments on commit 1885c83

Please sign in to comment.