Skip to content
Permalink
Browse files
8267932: [lworld] JIT support for the L/Q model (step 2)
  • Loading branch information
TobiHartmann committed Jul 9, 2021
1 parent 1144cc8 commit a2c8822392153d5c57c11ca99745dc0ab39f9015
Showing 26 changed files with 153 additions and 116 deletions.
@@ -56,7 +56,6 @@ void ciConstant::print() {
case T_DOUBLE:
tty->print("%lf", _value._double);
break;
case T_INLINE_TYPE:
default:
if (is_reference_type(basic_type())) {
_value._object->print();
@@ -693,7 +693,7 @@ ciConstant ciEnv::get_constant_by_index_impl(const constantPoolHandle& cpool,
}
assert (klass->is_instance_klass() || klass->is_array_klass(),
"must be an instance or array klass ");
if (tag.is_unresolved_klass()) {
if (!klass->is_loaded()) {
return ciConstant(T_OBJECT, get_unloaded_klass_mirror(klass));
} else {
if (tag.is_Qdescriptor_klass()) {
@@ -121,6 +121,20 @@ ciInstance* ciInlineKlass::default_instance() const {
)
}

ciInstance* ciInlineKlass::ref_instance() const {
GUARDED_VM_ENTRY(
oop ref_mirror = to_InlineKlass()->ref_mirror();
return CURRENT_ENV->get_instance(ref_mirror);
)
}

ciInstance* ciInlineKlass::val_instance() const {
GUARDED_VM_ENTRY(
oop val_mirror = to_InlineKlass()->val_mirror();
return CURRENT_ENV->get_instance(val_mirror);
)
}

bool ciInlineKlass::contains_oops() const {
GUARDED_VM_ENTRY(return get_InlineKlass()->contains_oops();)
}
@@ -85,6 +85,8 @@ class ciInlineKlass : public ciInstanceKlass {
int inline_arg_slots();
int default_value_offset() const;
ciInstance* default_instance() const;
ciInstance* ref_instance() const;
ciInstance* val_instance() const;
bool contains_oops() const;
int oop_count() const;
address pack_handler() const;
@@ -39,7 +39,7 @@

// ------------------------------------------------------------------
// ciObject::java_mirror_type
ciType* ciInstance::java_mirror_type() {
ciType* ciInstance::java_mirror_type(bool* is_val_mirror) {
VM_ENTRY_MARK;
oop m = get_oop();
// Return NULL if it is not java.lang.Class.
@@ -52,6 +52,9 @@ ciType* ciInstance::java_mirror_type() {
} else {
Klass* k = java_lang_Class::as_Klass(m);
assert(k != NULL, "");
if (is_val_mirror != NULL) {
*is_val_mirror = java_lang_Class::is_secondary_mirror(m);
}
return CURRENT_THREAD_ENV->get_klass(k);
}
}
@@ -55,7 +55,7 @@ class ciInstance : public ciObject {
// If this object is a java mirror, return the corresponding type.
// Otherwise, return NULL.
// (Remember that a java mirror is an instance of java.lang.Class.)
ciType* java_mirror_type();
ciType* java_mirror_type(bool* is_val_mirror = NULL);

// What kind of ciObject is this?
bool is_instance() { return true; }
@@ -630,7 +630,7 @@ ciReturnAddress* ciObjectFactory::get_return_address(int bci) {
}

ciWrapper* ciObjectFactory::make_null_free_wrapper(ciType* type) {
ciWrapper* wrapper = new (arena()) ciWrapper(type, /* null_free */ true);
ciWrapper* wrapper = new (arena()) ciWrapper(type);
init_ident_of(wrapper);
return wrapper;
}
@@ -72,7 +72,7 @@ class ciType : public ciMetadata {
bool is_type() const { return true; }
bool is_classless() const { return is_primitive_type(); }

virtual ciType* unwrap() { return this; }
virtual ciType* unwrap() { return this; }
virtual bool is_null_free() const { return false; }

const char* name();
@@ -112,33 +112,30 @@ class ciReturnAddress : public ciType {

// ciWrapper
//
// This class wraps another type to carry additional information like nullability.
// Should only be instantiated and used by ciTypeFlow and ciSignature.
// This class wraps another type to carry additional information.
// Currently it is only used to mark inline klasses as null-free.
class ciWrapper : public ciType {
CI_PACKAGE_ACCESS

private:
ciType* _type;
bool _null_free;

ciWrapper(ciType* type, bool null_free) : ciType(type->basic_type()) {
ciWrapper(ciType* type) : ciType(type->basic_type()) {
assert(type->is_inlinetype()
// An unloaded inline type is an instance_klass (see ciEnv::get_klass_by_name_impl())
|| (type->is_instance_klass() && !type->is_loaded()),
"should only be used for inline types");
_type = type;
_null_free = null_free;
}

const char* type_string() { return "ciWrapper"; }

void print_impl(outputStream* st) { _type->print_impl(st); }

public:
bool is_wrapper() const { return true; }

ciType* unwrap() { return _type; }
bool is_null_free() const { return _null_free; }
bool is_wrapper() const { return true; }
ciType* unwrap() { return _type; }
bool is_null_free() const { return true; }
};

#endif // SHARE_CI_CITYPE_HPP
@@ -350,7 +350,7 @@ ciType* ciTypeFlow::StateVector::type_meet_internal(ciType* t1, ciType* t2, ciTy
assert(k1->is_instance_klass(), "previous cases handle non-instances");
assert(k2->is_instance_klass(), "previous cases handle non-instances");
ciType* result = k1->least_common_ancestor(k2);
if (null_free1 && null_free2) {
if (null_free1 && null_free2 && result->is_inlinetype()) {
result = analyzer->mark_as_null_free(result);
}
return result;
@@ -637,6 +637,10 @@ void ciTypeFlow::StateVector::do_checkcast(ciBytecodeStream* str) {
}
} else {
ciType* type = pop_value();
if (type->unwrap() != klass && type->unwrap()->is_subtype_of(klass)) {
// Useless cast, propagate more precise type of object
klass = type->unwrap()->as_klass();
}
if (klass->is_inlinetype() && (null_free || type->is_null_free())) {
push(outer()->mark_as_null_free(klass));
} else {
@@ -1579,7 +1583,7 @@ bool ciTypeFlow::StateVector::apply_one_bytecode(ciBytecodeStream* str) {
// ------------------------------------------------------------------
// ciTypeFlow::StateVector::print_cell_on
void ciTypeFlow::StateVector::print_cell_on(outputStream* st, Cell c) const {
ciType* type = type_at(c);
ciType* type = type_at(c)->unwrap();
if (type == top_type()) {
st->print("top");
} else if (type == bottom_type()) {
@@ -345,6 +345,8 @@ class java_lang_Class : AllStatic {
static int klass_offset() { CHECK_INIT(_klass_offset); }
static int array_klass_offset() { CHECK_INIT(_array_klass_offset); }
static int component_mirror_offset() { CHECK_INIT(_component_mirror_offset); }
static int primary_mirror_offset() { CHECK_INIT(_primary_mirror_offset); }
static int secondary_mirror_offset() { CHECK_INIT(_secondary_mirror_offset); }
// Support for classRedefinedCount field
static int classRedefinedCount(oop the_class_mirror);
static void set_classRedefinedCount(oop the_class_mirror, int value);
@@ -267,6 +267,9 @@ void ZBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a
// This code will be unreachable
assert(StressReflectiveCode, "Guard against surprises");
bt = T_LONG;
} else if (ary_ptr->is_flat()) {
// Clone flat inline type array
bt = T_LONG;
} else {
bt = ary_ptr->elem()->array_element_basic_type();
if (is_reference_type(bt)) {
@@ -733,9 +733,15 @@ void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint
// return result(s) along with their RegMask info
Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) {
uint con = proj->_con;
const TypeTuple *range_cc = tf()->range_cc();
const TypeTuple* range_cc = tf()->range_cc();
if (con >= TypeFunc::Parms) {
if (is_CallRuntime()) {
if (tf()->returns_inline_type_as_fields()) {
// The call returns multiple values (inline type fields): we
// create one projection per returned value.
assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return");
uint ideal_reg = range_cc->field_at(con)->ideal_reg();
return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg);
} else {
if (con == TypeFunc::Parms) {
uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg();
OptoRegPair regs = Opcode() == Op_CallLeafVector
@@ -763,12 +769,6 @@ Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask*
assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, "");
return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad);
}
} else {
// The Call may return multiple values (inline type fields): we
// create one projection per returned value.
assert(con <= TypeFunc::Parms+1 || InlineTypeReturnedAsFields, "only for multi value return");
uint ideal_reg = range_cc->field_at(con)->ideal_reg();
return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg);
}
}

@@ -3680,6 +3680,13 @@ Node* GraphKit::inline_type_test(Node* obj, bool is_inline) {
return _gvn.transform(new BoolNode(cmp, is_inline ? BoolTest::eq : BoolTest::ne));
}

Node* GraphKit::is_val_mirror(Node* mirror) {
Node* p = basic_plus_adr(mirror, java_lang_Class::secondary_mirror_offset());
Node* secondary_mirror = access_load_at(mirror, p, _gvn.type(p)->is_ptr(), TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR), T_OBJECT, IN_HEAP);
Node* cmp = _gvn.transform(new CmpPNode(mirror, secondary_mirror));
return _gvn.transform(new BoolNode(cmp, BoolTest::eq));
}

Node* GraphKit::array_lh_test(Node* klass, jint mask, jint val, bool eq) {
Node* lh_adr = basic_plus_adr(klass, in_bytes(Klass::layout_helper_offset()));
// Make sure to use immutable memory here to enable hoisting the check out of loops
@@ -869,6 +869,7 @@ class GraphKit : public Phase {

// Inline types
Node* inline_type_test(Node* obj, bool is_inline = true);
Node* is_val_mirror(Node* mirror);
Node* array_lh_test(Node* kls, jint mask, jint val, bool eq = true);
Node* flat_array_test(Node* ary, bool flat = true);
Node* null_free_array_test(Node* klass, bool null_free = true);
@@ -2516,7 +2516,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c
p = gvn().transform(new CastP2XNode(NULL, p));
p = ConvX2UL(p);
}
if (field != NULL && field->type()->is_inlinetype() && !field->is_flattened()) {
if (field != NULL && field->is_null_free() && !field->is_flattened()) {
// Load a non-flattened inline type from memory
if (value_type->inline_klass()->is_scalarizable()) {
p = InlineTypeNode::make_from_oop(this, p, value_type->inline_klass());
@@ -3155,10 +3155,6 @@ Node* LibraryCallKit::generate_hidden_class_guard(Node* kls, RegionNode* region)
return generate_access_flags_guard(kls, JVM_ACC_IS_HIDDEN_CLASS, 0, region);
}

Node* LibraryCallKit::generate_value_guard(Node* kls, RegionNode* region) {
return generate_access_flags_guard(kls, JVM_ACC_INLINE, 0, region);
}

//-------------------------inline_native_Class_query-------------------
bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
const Type* return_type = TypeInt::BOOL;
@@ -3367,15 +3363,14 @@ bool LibraryCallKit::inline_Class_cast() {
// First, see if Class.cast() can be folded statically.
// java_mirror_type() returns non-null for compile-time Class constants.
bool requires_null_check = false;
ciType* tm = mirror_con->java_mirror_type();
ciType* tm = mirror_con->java_mirror_type(&requires_null_check);
// Check for null if casting to QMyValue
requires_null_check &= !obj->is_InlineType();
if (tm != NULL && tm->is_klass() && obj_klass != NULL) {
if (!obj_klass->is_loaded()) {
// Don't use intrinsic when class is not loaded.
return false;
} else {
// Check for null if casting to .val
requires_null_check = !obj->is_InlineType() && tm->as_klass()->is_inlinetype();

int static_res = C->static_subtype_check(tm->as_klass(), obj_klass);
if (static_res == Compile::SSC_always_true) {
// isInstance() is true - fold the code.
@@ -3424,20 +3419,22 @@ bool LibraryCallKit::inline_Class_cast() {
Node* res = top();
if (!stopped()) {
if (EnableValhalla && !obj->is_InlineType() && !requires_null_check) {
// Check if we are casting to .val
Node* is_val_kls = generate_value_guard(kls, NULL);
if (is_val_kls != NULL) {
// Check if we are casting to QMyValue
Node* ctrl_val_mirror = generate_fair_guard(is_val_mirror(mirror), NULL);
if (ctrl_val_mirror != NULL) {
RegionNode* r = new RegionNode(3);
record_for_igvn(r);
r->init_req(1, control());

// Casting to .val, check for null
set_control(is_val_kls);
Node* null_ctr = top();
null_check_oop(obj, &null_ctr);
region->init_req(_npe_path, null_ctr);
r->init_req(2, control());

// Casting to QMyValue, check for null
set_control(ctrl_val_mirror);
{ // PreserveJVMState because null check replaces obj in map
PreserveJVMState pjvms(this);
Node* null_ctr = top();
null_check_oop(obj, &null_ctr);
region->init_req(_npe_path, null_ctr);
r->init_req(2, control());
}
set_control(_gvn.transform(r));
}
}
@@ -3528,6 +3525,9 @@ bool LibraryCallKit::inline_native_subtype_check() {
Node* subk = klasses[1]; // the argument to isAssignableFrom
Node* superk = klasses[0]; // the receiver
region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
// If superc is an inline mirror, we also need to check if superc == subc because LMyValue
// is not a subtype of QMyValue but due to subk == superk the subtype check will pass.
generate_fair_guard(is_val_mirror(args[0]), prim_region);
// now we have a successful reference subtype check
region->set_req(_ref_subtype_path, control());
}
@@ -4540,7 +4540,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {

BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
const TypeAryPtr* ary_ptr = obj_type->isa_aryptr();
if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing) &&
if (UseFlatArray && bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Expansion) &&
obj_type->klass()->can_be_inline_array_klass() &&
(ary_ptr == NULL || (!ary_ptr->is_not_flat() && (!ary_ptr->is_flat() || ary_ptr->elem()->inline_klass()->contains_oops())))) {
// Flattened inline type array may have object field that would require a
@@ -165,7 +165,6 @@ class LibraryCallKit : public GraphKit {
int modifier_mask, int modifier_bits,
RegionNode* region);
Node* generate_interface_guard(Node* kls, RegionNode* region);
Node* generate_value_guard(Node* kls, RegionNode* region);

enum ArrayKind {
AnyArray,

0 comments on commit a2c8822

Please sign in to comment.