From 55082fe923545a08a3f6e120c63dfe39ab9374be Mon Sep 17 00:00:00 2001 From: Dave Estes Date: Mon, 26 Sep 2016 15:34:17 -0700 Subject: [PATCH] Correctly calculate stub_size(). Summary: Additionally refactor extract_spoff() using a lighter-weight instruction decoding approach in keeping with the x86_64 and PPC64 implementations. Closes https://github.com/facebook/hhvm/pull/7373 Differential Revision: D3896900 Pulled By: aorenste fbshipit-source-id: 189afc1ea91bf54db2e738906d80d4d037fc8438 --- hphp/runtime/vm/jit/service-requests.cpp | 86 +++++++++++++----------- hphp/vixl/a64/instructions-a64.h | 5 ++ 2 files changed, 51 insertions(+), 40 deletions(-) diff --git a/hphp/runtime/vm/jit/service-requests.cpp b/hphp/runtime/vm/jit/service-requests.cpp index a72555447f64c..d5ce9b086c533 100644 --- a/hphp/runtime/vm/jit/service-requests.cpp +++ b/hphp/runtime/vm/jit/service-requests.cpp @@ -32,6 +32,7 @@ #include "hphp/util/trace.h" #include "hphp/vixl/a64/macro-assembler-a64.h" +#include "hphp/vixl/a64/disasm-a64.h" #include "hphp/ppc64-asm/decoded-instr-ppc64.h" @@ -207,9 +208,18 @@ namespace x64 { } namespace arm { - // 'lea' results in atmost 4 instructions (see vasm-arm.cpp) - static constexpr int kMovLen = 4 * 4; - static constexpr int kLeaVmSpLen = 4 * 4; + // vasm lea is emitted in 4 bytes. + // ADD imm + static constexpr int kLeaVmSpLen = 4; + // The largest of vasm setcc, copy, or leap is emitted in 8 bytes. + // AND imm, MOV, or ADRP + ADD imm + static constexpr int kMovLen = 8; + // The largest of vasm copy or leap is emitted in 8 bytes. + // MOV or ADRP + ADD imm + static constexpr int kPersist = 8; + // vasm copy and jmpi is emitted in 20 bytes. + // MOV and 16 + static constexpr int kSvcReqExit = 20; } namespace ppc64 { @@ -227,7 +237,9 @@ size_t stub_size() { case Arch::X64: return kTotalArgs * x64::kMovLen + x64::kLeaVmSpLen; case Arch::ARM: - return kTotalArgs * arm::kMovLen + arm::kLeaVmSpLen; + return arm::kLeaVmSpLen + + kTotalArgs * arm::kMovLen + + arm::kPersist + arm::kSvcReqExit; case Arch::PPC64: // This calculus was based on the amount of emitted instructions in // emit_svcreq. @@ -254,42 +266,36 @@ FPInvOffset extract_spoff(TCA stub) { } case Arch::ARM: { - struct Decoder : public vixl::Decoder { - void VisitAddSubImmediate(vixl::Instruction* inst) { - // For immediate operands, shift can be '0' or '12' - int64_t immed = - inst->ImmAddSub() << ((inst->ShiftAddSub() == 1) ? 12 : 0); - switch (inst->Mask(vixl::AddSubOpMask)) { - case vixl::ADD: offset = immed; break; - case vixl::SUB: offset = -immed; break; - default: break; - } - } - void VisitMoveWideImmediate(vixl::Instruction* inst) { - // For wide moves, shift can be 0, 16, 32 or 64 - int64_t immed = safe_cast( - inst->ImmMoveWide() << (inst->ShiftMoveWide() << 4)); - switch (inst->Mask(vixl::MoveWideImmediateMask)) { - case vixl::MOVN_w: - case vixl::MOVN_x: - immed = safe_cast(~immed); - break; - } - offset = immed; - } - folly::Optional offset; - }; - Decoder decoder; - decoder.Decode((vixl::Instruction*)(stub)); - - // 'lea' becomes - // a. 'add dst, base, #imm' or - // b. 'mov r, #imm' - // 'add dst, base, r' - // FIXME: Return '0' if vasm optimizes 'lea' to 'mov' - if (!decoder.offset) return FPInvOffset{0}; - always_assert(decoder.offset && (*decoder.offset % sizeof(Cell)) == 0); - return FPInvOffset{-(*decoder.offset / int32_t{sizeof(Cell)})}; + auto instr = reinterpret_cast(stub); + + if (instr->IsAddSubImmediate()) { + auto const offBytes = safe_cast(instr->ImmAddSub()); + always_assert((offBytes % sizeof(Cell)) == 0); + + if (instr->Mask(vixl::AddSubImmediateMask) == vixl::SUB_w_imm || + instr->Mask(vixl::AddSubImmediateMask) == vixl::SUB_x_imm) { + return FPInvOffset{offBytes / int32_t{sizeof(Cell)}}; + } else if (instr->Mask(vixl::AddSubImmediateMask) == vixl::ADD_w_imm || + instr->Mask(vixl::AddSubImmediateMask) == vixl::ADD_x_imm) { + return FPInvOffset{-(offBytes / int32_t{sizeof(Cell)})}; + } + } else if (instr->IsMovn()) { + auto next = instr->NextInstruction(); + always_assert(next->Mask(vixl::AddSubShiftedMask) == vixl::ADD_w_shift || + next->Mask(vixl::AddSubShiftedMask) == vixl::ADD_x_shift); + auto const offBytes = safe_cast(~instr->ImmMoveWide()); + always_assert((offBytes % sizeof(Cell)) == 0); + return FPInvOffset{-(offBytes / int32_t{sizeof(Cell)})}; + } else if (instr->IsMovz()) { + auto next = instr->NextInstruction(); + always_assert(next->Mask(vixl::AddSubShiftedMask) == vixl::SUB_w_shift || + next->Mask(vixl::AddSubShiftedMask) == vixl::SUB_x_shift); + auto const offBytes = safe_cast(instr->ImmMoveWide()); + always_assert((offBytes % sizeof(Cell)) == 0); + return FPInvOffset{offBytes / int32_t{sizeof(Cell)}}; + } else { + always_assert(false && "Expected an instruction that offsets SP"); + } } case Arch::PPC64: { diff --git a/hphp/vixl/a64/instructions-a64.h b/hphp/vixl/a64/instructions-a64.h index f4650ffcfa336..35742a3f68625 100644 --- a/hphp/vixl/a64/instructions-a64.h +++ b/hphp/vixl/a64/instructions-a64.h @@ -234,6 +234,11 @@ class Instruction { (Mask(MoveWideImmediateMask) == MOVN_w); } + inline bool IsMovz() const { + return (Mask(MoveWideImmediateMask) == MOVZ_x) || + (Mask(MoveWideImmediateMask) == MOVZ_w); + } + // Indicate whether Rd can be the stack pointer or the zero register. This // does not check that the instruction actually has an Rd field. inline Reg31Mode RdMode() const {