Skip to content

Commit

Permalink
[vm/aot-switchable-calls] Introduce single runtime entry for all swit…
Browse files Browse the repository at this point in the history
…chable calls

Bug: dart-lang/sdk#37835
Bug: dart-lang/sdk#36097
Change-Id: I0198fd0328945b04e4f2254bacac25b41038e78c
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/138361
Commit-Queue: Alexander Aprelev <aam@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
  • Loading branch information
aam authored and commit-bot@chromium.org committed Apr 4, 2020
1 parent 2e0bec3 commit 80ae6ed
Show file tree
Hide file tree
Showing 35 changed files with 1,146 additions and 1,139 deletions.
4 changes: 2 additions & 2 deletions runtime/vm/class_finalizer.cc
Expand Up @@ -1757,9 +1757,9 @@ void ClassFinalizer::ClearAllCode(bool including_nonchanging_cids) {
object_store->set_build_method_extractor_code(null_code);

auto& miss_function =
Function::Handle(zone, object_store->megamorphic_miss_function());
Function::Handle(zone, object_store->megamorphic_call_miss_function());
miss_function.ClearCode();
object_store->SetMegamorphicMissHandler(null_code, miss_function);
object_store->SetMegamorphicCallMissHandler(null_code, miss_function);
}
#endif // !DART_PRECOMPILED_RUNTIME
}
Expand Down
1 change: 1 addition & 0 deletions runtime/vm/class_id.h
Expand Up @@ -41,6 +41,7 @@ namespace dart {
V(SingleTargetCache) \
V(UnlinkedCall) \
V(MonomorphicSmiableCall) \
V(CallSiteData) \
V(ICData) \
V(MegamorphicCache) \
V(SubtypeTestCache) \
Expand Down
20 changes: 13 additions & 7 deletions runtime/vm/clustered_snapshot.cc
Expand Up @@ -5442,8 +5442,8 @@ void Serializer::AddVMIsolateBaseObjects() {

ClassTable* table = isolate()->class_table();
for (intptr_t cid = kClassCid; cid < kInstanceCid; cid++) {
// Error has no class object.
if (cid != kErrorCid) {
// Error, CallSiteData has no class object.
if (cid != kErrorCid && cid != kCallSiteDataCid) {
ASSERT(table->HasValidClassAt(cid));
AddBaseObject(table->At(cid), "Class");
}
Expand Down Expand Up @@ -6109,8 +6109,8 @@ void Deserializer::AddVMIsolateBaseObjects() {

ClassTable* table = isolate()->class_table();
for (intptr_t cid = kClassCid; cid <= kUnwindErrorCid; cid++) {
// Error has no class object.
if (cid != kErrorCid) {
// Error, CallSiteData has no class object.
if (cid != kErrorCid && cid != kCallSiteDataCid) {
ASSERT(table->HasValidClassAt(cid));
AddBaseObject(table->At(cid));
}
Expand Down Expand Up @@ -6550,7 +6550,7 @@ RawApiError* FullSnapshotReader::ReadIsolateSnapshot() {
if (FLAG_use_bare_instructions) {
// By default, every switchable call site will put (ic_data, code) into the
// object pool. The [code] is initialized (at AOT compile-time) to be a
// [StubCode::UnlinkedCall].
// [StubCode::SwitchableCallMiss].
//
// In --use-bare-instruction we reduce the extra indirection via the [code]
// object and store instead (ic_data, entrypoint) in the object pool.
Expand All @@ -6565,9 +6565,15 @@ RawApiError* FullSnapshotReader::ReadIsolateSnapshot() {
for (intptr_t i = 0; i < pool.Length(); i++) {
if (pool.TypeAt(i) == ObjectPool::EntryType::kTaggedObject) {
entry = pool.ObjectAt(i);
if (entry.raw() == StubCode::UnlinkedCall().raw()) {
if (entry.raw() == StubCode::SwitchableCallMiss().raw()) {
smi = Smi::FromAlignedAddress(
StubCode::UnlinkedCall().MonomorphicEntryPoint());
StubCode::SwitchableCallMiss().MonomorphicEntryPoint());
pool.SetTypeAt(i, ObjectPool::EntryType::kImmediate,
ObjectPool::Patchability::kPatchable);
pool.SetObjectAt(i, smi);
} else if (entry.raw() == StubCode::MegamorphicCall().raw()) {
smi = Smi::FromAlignedAddress(
StubCode::MegamorphicCall().MonomorphicEntryPoint());
pool.SetTypeAt(i, ObjectPool::EntryType::kImmediate,
ObjectPool::Patchability::kPatchable);
pool.SetObjectAt(i, smi);
Expand Down
44 changes: 25 additions & 19 deletions runtime/vm/compiler/assembler/assembler_arm.cc
Expand Up @@ -1592,7 +1592,6 @@ void Assembler::LoadIsolate(Register rd) {

bool Assembler::CanLoadFromObjectPool(const Object& object) const {
ASSERT(IsOriginalObject(object));
ASSERT(!target::CanLoadFromThread(object));
if (!constant_pool_allowed()) {
return false;
}
Expand All @@ -1608,24 +1607,31 @@ void Assembler::LoadObjectHelper(Register rd,
bool is_unique,
Register pp) {
ASSERT(IsOriginalObject(object));
intptr_t offset = 0;
if (target::CanLoadFromThread(object, &offset)) {
// Load common VM constants from the thread. This works also in places where
// no constant pool is set up (e.g. intrinsic code).
ldr(rd, Address(THR, offset), cond);
} else if (target::IsSmi(object)) {
// Relocation doesn't apply to Smis.
LoadImmediate(rd, target::ToRawSmi(object), cond);
} else if (CanLoadFromObjectPool(object)) {
// Make sure that class CallPattern is able to decode this load from the
// object pool.
const auto index = is_unique ? object_pool_builder().AddObject(object)
: object_pool_builder().FindObject(object);
const int32_t offset = target::ObjectPool::element_offset(index);
LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond);
} else {
// `is_unique == true` effectively means object has to be patchable.
if (!is_unique) {
intptr_t offset = 0;
if (target::CanLoadFromThread(object, &offset)) {
// Load common VM constants from the thread. This works also in places
// where no constant pool is set up (e.g. intrinsic code).
ldr(rd, Address(THR, offset), cond);
return;
}
if (target::IsSmi(object)) {
// Relocation doesn't apply to Smis.
LoadImmediate(rd, target::ToRawSmi(object), cond);
return;
}
}
if (!CanLoadFromObjectPool(object)) {
UNREACHABLE();
return;
}
// Make sure that class CallPattern is able to decode this load from the
// object pool.
const auto index = is_unique ? object_pool_builder().AddObject(object)
: object_pool_builder().FindObject(object);
const int32_t offset = target::ObjectPool::element_offset(index);
LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond);
}

void Assembler::LoadObject(Register rd, const Object& object, Condition cond) {
Expand Down Expand Up @@ -3459,7 +3465,7 @@ void Assembler::MonomorphicCheckedEntryJIT() {
LoadClassIdMayBeSmi(IP, R0);
add(R2, R2, Operand(target::ToRawSmi(1)));
cmp(R1, Operand(IP, LSL, 1));
Branch(Address(THR, target::Thread::monomorphic_miss_entry_offset()), NE);
Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()), NE);
str(R2, FieldAddress(R9, count_offset));
LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction.

Expand Down Expand Up @@ -3488,7 +3494,7 @@ void Assembler::MonomorphicCheckedEntryAOT() {

LoadClassId(IP, R0);
cmp(R9, Operand(IP, LSL, 1));
Branch(Address(THR, target::Thread::monomorphic_miss_entry_offset()), NE);
Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()), NE);

// Fall through to unchecked entry.
ASSERT_EQUAL(CodeSize() - start,
Expand Down
33 changes: 20 additions & 13 deletions runtime/vm/compiler/assembler/assembler_arm64.cc
Expand Up @@ -430,7 +430,6 @@ intptr_t Assembler::FindImmediate(int64_t imm) {

bool Assembler::CanLoadFromObjectPool(const Object& object) const {
ASSERT(IsOriginalObject(object));
ASSERT(!target::CanLoadFromThread(object));
if (!constant_pool_allowed()) {
return false;
}
Expand Down Expand Up @@ -464,20 +463,28 @@ void Assembler::LoadObjectHelper(Register dst,
const Object& object,
bool is_unique) {
ASSERT(IsOriginalObject(object));
word offset = 0;
if (IsSameObject(compiler::NullObject(), object)) {
mov(dst, NULL_REG);
} else if (target::CanLoadFromThread(object, &offset)) {
ldr(dst, Address(THR, offset));
} else if (CanLoadFromObjectPool(object)) {
// `is_unique == true` effectively means object has to be patchable.
// (even if the object is null)
if (!is_unique) {
if (IsSameObject(compiler::NullObject(), object)) {
mov(dst, NULL_REG);
return;
}
word offset = 0;
if (target::CanLoadFromThread(object, &offset)) {
ldr(dst, Address(THR, offset));
return;
}
}
if (CanLoadFromObjectPool(object)) {
const int32_t offset = target::ObjectPool::element_offset(
is_unique ? object_pool_builder().AddObject(object)
: object_pool_builder().FindObject(object));
LoadWordFromPoolOffset(dst, offset);
} else {
ASSERT(target::IsSmi(object));
LoadImmediate(dst, target::ToRawSmi(object));
return;
}
ASSERT(target::IsSmi(object));
LoadImmediate(dst, target::ToRawSmi(object));
}

void Assembler::LoadObject(Register dst, const Object& object) {
Expand Down Expand Up @@ -1549,7 +1556,7 @@ void Assembler::MonomorphicCheckedEntryJIT() {

Label immediate, miss;
Bind(&miss);
ldr(IP0, Address(THR, target::Thread::monomorphic_miss_entry_offset()));
ldr(IP0, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
br(IP0);

Comment("MonomorphicCheckedEntry");
Expand All @@ -1567,7 +1574,7 @@ void Assembler::MonomorphicCheckedEntryJIT() {
cmp(R1, Operand(IP0, LSL, 1));
b(&miss, NE);
str(R2, FieldAddress(R5, count_offset));
LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction.
LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction

// Fall through to unchecked entry.
ASSERT_EQUAL(CodeSize() - start,
Expand All @@ -1587,7 +1594,7 @@ void Assembler::MonomorphicCheckedEntryAOT() {

Label immediate, miss;
Bind(&miss);
ldr(IP0, Address(THR, target::Thread::monomorphic_miss_entry_offset()));
ldr(IP0, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
br(IP0);

Comment("MonomorphicCheckedEntry");
Expand Down
2 changes: 1 addition & 1 deletion runtime/vm/compiler/assembler/assembler_ia32.cc
Expand Up @@ -2123,7 +2123,7 @@ void Assembler::MonomorphicCheckedEntryJIT() {
intptr_t start = CodeSize();
Label have_cid, miss;
Bind(&miss);
jmp(Address(THR, target::Thread::monomorphic_miss_entry_offset()));
jmp(Address(THR, target::Thread::switchable_call_miss_entry_offset()));

Comment("MonomorphicCheckedEntry");
ASSERT(CodeSize() - start ==
Expand Down
32 changes: 19 additions & 13 deletions runtime/vm/compiler/assembler/assembler_x64.cc
Expand Up @@ -1208,7 +1208,6 @@ void Assembler::Drop(intptr_t stack_elements, Register tmp) {

bool Assembler::CanLoadFromObjectPool(const Object& object) const {
ASSERT(IsOriginalObject(object));
ASSERT(!target::CanLoadFromThread(object));
if (!constant_pool_allowed()) {
return false;
}
Expand Down Expand Up @@ -1243,18 +1242,23 @@ void Assembler::LoadObjectHelper(Register dst,
bool is_unique) {
ASSERT(IsOriginalObject(object));

intptr_t offset_from_thread;
if (target::CanLoadFromThread(object, &offset_from_thread)) {
movq(dst, Address(THR, offset_from_thread));
} else if (CanLoadFromObjectPool(object)) {
const intptr_t idx = is_unique ? object_pool_builder().AddObject(object)
: object_pool_builder().FindObject(object);
const int32_t offset = target::ObjectPool::element_offset(idx);
// `is_unique == true` effectively means object has to be patchable.
if (!is_unique) {
intptr_t offset;
if (target::CanLoadFromThread(object, &offset)) {
movq(dst, Address(THR, offset));
return;
}
}
if (CanLoadFromObjectPool(object)) {
const int32_t offset = target::ObjectPool::element_offset(
is_unique ? object_pool_builder().AddObject(object)
: object_pool_builder().FindObject(object));
LoadWordFromPoolOffset(dst, offset - kHeapObjectTag);
} else {
ASSERT(target::IsSmi(object));
LoadImmediate(dst, Immediate(target::ToRawSmi(object)));
return;
}
ASSERT(target::IsSmi(object));
LoadImmediate(dst, Immediate(target::ToRawSmi(object)));
}

void Assembler::LoadObject(Register dst, const Object& object) {
Expand Down Expand Up @@ -1801,7 +1805,7 @@ void Assembler::MonomorphicCheckedEntryJIT() {
intptr_t start = CodeSize();
Label have_cid, miss;
Bind(&miss);
jmp(Address(THR, target::Thread::monomorphic_miss_entry_offset()));
jmp(Address(THR, target::Thread::switchable_call_miss_entry_offset()));

// Ensure the monomorphic entry is 2-byte aligned (so GC can see them if we
// store them in ICData / MegamorphicCache arrays)
Expand Down Expand Up @@ -1829,12 +1833,14 @@ void Assembler::MonomorphicCheckedEntryJIT() {
ASSERT(((CodeSize() - start) & kSmiTagMask) == kSmiTag);
}

// RBX - input: class id smi
// RDX - input: receiver object
void Assembler::MonomorphicCheckedEntryAOT() {
has_monomorphic_entry_ = true;
intptr_t start = CodeSize();
Label have_cid, miss;
Bind(&miss);
jmp(Address(THR, target::Thread::monomorphic_miss_entry_offset()));
jmp(Address(THR, target::Thread::switchable_call_miss_entry_offset()));

// Ensure the monomorphic entry is 2-byte aligned (so GC can see them if we
// store them in ICData / MegamorphicCache arrays)
Expand Down
32 changes: 24 additions & 8 deletions runtime/vm/compiler/backend/flow_graph_compiler_arm.cc
Expand Up @@ -1132,13 +1132,29 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
// Load receiver into R0.
__ LoadFromOffset(kWord, R0, SP,
(args_desc.Count() - 1) * compiler::target::kWordSize);
__ LoadObject(R9, cache);
__ ldr(
LR,
compiler::Address(
THR,
compiler::target::Thread::megamorphic_call_checked_entry_offset()));
__ blx(LR);
// Use same code pattern as instance call so it can be parsed by code patcher.
if (FLAG_precompiled_mode) {
if (FLAG_use_bare_instructions) {
// The AOT runtime will replace the slot in the object pool with the
// entrypoint address - see clustered_snapshot.cc.
__ LoadUniqueObject(LR, StubCode::MegamorphicCall());
} else {
__ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
__ ldr(LR, compiler::FieldAddress(
CODE_REG, compiler::target::Code::entry_point_offset(
Code::EntryKind::kMonomorphic)));
}
__ LoadUniqueObject(R9, cache);
__ blx(LR);

} else {
__ LoadUniqueObject(R9, cache);
__ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
__ ldr(LR, compiler::FieldAddress(
CODE_REG,
Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
__ blx(LR);
}

RecordSafepoint(locs, slow_path_argument_count);
const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
Expand Down Expand Up @@ -1173,7 +1189,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(ic_data.NumArgsTested() == 1);
const Code& initial_stub = StubCode::UnlinkedCall();
const Code& initial_stub = StubCode::SwitchableCallMiss();
const char* switchable_call_mode = "smiable";
if (!receiver_can_be_smi) {
switchable_call_mode = "non-smi";
Expand Down
27 changes: 23 additions & 4 deletions runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc
Expand Up @@ -1089,9 +1089,25 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
// Load receiver into R0.
__ LoadFromOffset(R0, SP, (args_desc.Count() - 1) * kWordSize);

__ LoadObject(R5, cache);
__ ldr(LR, compiler::Address(
THR, Thread::megamorphic_call_checked_entry_offset()));
// Use same code pattern as instance call so it can be parsed by code patcher.
compiler::ObjectPoolBuilder& op = __ object_pool_builder();
const intptr_t data_index =
op.AddObject(cache, ObjectPool::Patchability::kPatchable);
const intptr_t stub_index = op.AddObject(
StubCode::MegamorphicCall(), ObjectPool::Patchability::kPatchable);
ASSERT((data_index + 1) == stub_index);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
// The AOT runtime will replace the slot in the object pool with the
// entrypoint address - see clustered_snapshot.cc.
__ LoadDoubleWordFromPoolOffset(R5, LR,
ObjectPool::element_offset(data_index));
} else {
__ LoadDoubleWordFromPoolOffset(R5, CODE_REG,
ObjectPool::element_offset(data_index));
__ ldr(LR, compiler::FieldAddress(
CODE_REG,
Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
}
__ blr(LR);

RecordSafepoint(locs, slow_path_argument_count);
Expand Down Expand Up @@ -1125,7 +1141,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
bool receiver_can_be_smi) {
ASSERT(CanCallDart());
ASSERT(ic_data.NumArgsTested() == 1);
const Code& initial_stub = StubCode::UnlinkedCall();
const Code& initial_stub = StubCode::SwitchableCallMiss();
const char* switchable_call_mode = "smiable";
if (!receiver_can_be_smi) {
switchable_call_mode = "non-smi";
Expand All @@ -1137,6 +1153,9 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
compiler::ObjectPoolBuilder& op = __ object_pool_builder();

__ Comment("InstanceCallAOT (%s)", switchable_call_mode);
// Clear argument descriptor to keep gc happy when it gets pushed on to
// the stack.
__ LoadImmediate(R4, 0);
__ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);

const intptr_t data_index =
Expand Down
7 changes: 4 additions & 3 deletions runtime/vm/compiler/backend/flow_graph_compiler_ia32.cc
Expand Up @@ -982,9 +982,10 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
__ Comment("MegamorphicCall");
// Load receiver into EBX.
__ movl(EBX, compiler::Address(ESP, (args_desc.Count() - 1) * kWordSize));
__ LoadObject(ECX, cache);
__ call(
compiler::Address(THR, Thread::megamorphic_call_checked_entry_offset()));
__ LoadObject(ECX, cache, true);
__ LoadObject(CODE_REG, StubCode::MegamorphicCall(), true);
__ call(compiler::FieldAddress(
CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));

AddCurrentDescriptor(RawPcDescriptors::kOther, DeoptId::kNone, token_pos);
RecordSafepoint(locs, slow_path_argument_count);
Expand Down

0 comments on commit 80ae6ed

Please sign in to comment.