Skip to content
This repository has been archived by the owner on Apr 23, 2020. It is now read-only.

Commit

Permalink
HHVM calling conventions.
Browse files Browse the repository at this point in the history
HHVM calling convention, hhvmcc, is used by HHVM JIT for
functions in translated cache. We currently support LLVM back end to
generate code for X86-64 and may support other architectures in the
future.

In HHVM calling convention any GP register could be used to pass and
return values, with the exception of R12 which is reserved for
thread-local area and is callee-saved. Other than R12, we always
pass RBX and RBP as args, which are our virtual machine's stack pointer
and frame pointer respectively.

When we enter translation cache via hhvmcc function, we expect
the stack to be aligned at 16 bytes, i.e. skewed by 8 bytes as opposed
to standard ABI alignment. This affects stack object alignment and stack
adjustments for function calls.

One extra calling convention, hhvm_ccc, is used to call C++ helpers from
HHVM's translation cache. It is almost identical to standard C calling
convention with an exception of first argument which is passed in RBP
(before we use RDI, RSI, etc.)

Differential Revision: http://reviews.llvm.org/D12681

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@248832 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
maksfb committed Sep 29, 2015
1 parent 5518a74 commit 3b3752c
Show file tree
Hide file tree
Showing 16 changed files with 369 additions and 29 deletions.
11 changes: 10 additions & 1 deletion include/llvm/IR/CallingConv.h
Expand Up @@ -147,7 +147,16 @@ namespace CallingConv {

/// \brief MSVC calling convention that passes vectors and vector aggregates
/// in SSE registers.
X86_VectorCall = 80
X86_VectorCall = 80,

/// \brief Calling convention used by HipHop Virtual Machine (HHVM) to
/// perform calls to and from translation cache, and for calling PHP
/// functions.
/// HHVM calling convention supports tail/sibling call elimination.
HHVM = 81,

/// \brief HHVM calling convention for invoking C/C++ helpers.
HHVM_C = 82
};
} // End CallingConv namespace

Expand Down
16 changes: 14 additions & 2 deletions include/llvm/Support/MathExtras.h
Expand Up @@ -599,15 +599,27 @@ inline uint64_t PowerOf2Floor(uint64_t A) {
/// Returns the next integer (mod 2**64) that is greater than or equal to
/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
///
/// If non-zero \p Skew is specified, the return value will be a minimal
/// integer that is greater than or equal to \p Value and equal to
/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
/// \p Align, its value is adjusted to '\p Skew mod \p Align'.
///
/// Examples:
/// \code
/// RoundUpToAlignment(5, 8) = 8
/// RoundUpToAlignment(17, 8) = 24
/// RoundUpToAlignment(~0LL, 8) = 0
/// RoundUpToAlignment(321, 255) = 510
///
/// RoundUpToAlignment(5, 8, 7) = 7
/// RoundUpToAlignment(17, 8, 1) = 17
/// RoundUpToAlignment(~0LL, 8, 3) = 3
/// RoundUpToAlignment(321, 255, 42) = 552
/// \endcode
inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align) {
return (Value + Align - 1) / Align * Align;
inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align,
uint64_t Skew = 0) {
Skew %= Align;
return (Value + Align - 1 - Skew) / Align * Align + Skew;
}

/// Returns the offset to the next integer (mod 2**64) that is greater than
Expand Down
5 changes: 5 additions & 0 deletions include/llvm/Target/TargetFrameLowering.h
Expand Up @@ -96,6 +96,11 @@ class TargetFrameLowering {
return StackRealignable;
}

/// Return the skew that has to be applied to stack alignment under
/// certain conditions (e.g. stack was adjusted before function \p MF
/// was called).
virtual unsigned getStackAlignmentSkew(const MachineFunction &MF) const;

/// getOffsetOfLocalArea - This method returns the offset of the local area
/// from the stack pointer on entrance to a function.
///
Expand Down
2 changes: 2 additions & 0 deletions lib/AsmParser/LLLexer.cpp
Expand Up @@ -587,6 +587,8 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(preserve_mostcc);
KEYWORD(preserve_allcc);
KEYWORD(ghccc);
KEYWORD(hhvmcc);
KEYWORD(hhvm_ccc);

KEYWORD(cc);
KEYWORD(c);
Expand Down
4 changes: 4 additions & 0 deletions lib/AsmParser/LLParser.cpp
Expand Up @@ -1532,6 +1532,8 @@ bool LLParser::ParseOptionalDLLStorageClass(unsigned &Res) {
/// ::= 'preserve_mostcc'
/// ::= 'preserve_allcc'
/// ::= 'ghccc'
/// ::= 'hhvmcc'
/// ::= 'hhvm_ccc'
/// ::= 'cc' UINT
///
bool LLParser::ParseOptionalCallingConv(unsigned &CC) {
Expand Down Expand Up @@ -1560,6 +1562,8 @@ bool LLParser::ParseOptionalCallingConv(unsigned &CC) {
case lltok::kw_preserve_mostcc:CC = CallingConv::PreserveMost; break;
case lltok::kw_preserve_allcc: CC = CallingConv::PreserveAll; break;
case lltok::kw_ghccc: CC = CallingConv::GHC; break;
case lltok::kw_hhvmcc: CC = CallingConv::HHVM; break;
case lltok::kw_hhvm_ccc: CC = CallingConv::HHVM_C; break;
case lltok::kw_cc: {
Lex.Lex();
return ParseUInt32(CC);
Expand Down
1 change: 1 addition & 0 deletions lib/AsmParser/LLToken.h
Expand Up @@ -97,6 +97,7 @@ namespace lltok {
kw_webkit_jscc, kw_anyregcc,
kw_preserve_mostcc, kw_preserve_allcc,
kw_ghccc,
kw_hhvmcc, kw_hhvm_ccc,

// Attributes:
kw_attributes,
Expand Down
33 changes: 18 additions & 15 deletions lib/CodeGen/PrologEpilogInserter.cpp
Expand Up @@ -500,7 +500,7 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
static inline void
AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
bool StackGrowsDown, int64_t &Offset,
unsigned &MaxAlign) {
unsigned &MaxAlign, unsigned Skew) {
// If the stack grows down, add the object size to find the lowest address.
if (StackGrowsDown)
Offset += MFI->getObjectSize(FrameIdx);
Expand All @@ -512,7 +512,7 @@ AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
MaxAlign = std::max(MaxAlign, Align);

// Adjust to alignment boundary.
Offset = RoundUpToAlignment(Offset, Align);
Offset = RoundUpToAlignment(Offset, Align, Skew);

if (StackGrowsDown) {
DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
Expand All @@ -530,12 +530,12 @@ static void
AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
SmallSet<int, 16> &ProtectedObjs,
MachineFrameInfo *MFI, bool StackGrowsDown,
int64_t &Offset, unsigned &MaxAlign) {
int64_t &Offset, unsigned &MaxAlign, unsigned Skew) {

for (StackObjSet::const_iterator I = UnassignedObjs.begin(),
E = UnassignedObjs.end(); I != E; ++I) {
int i = *I;
AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew);
ProtectedObjs.insert(i);
}
}
Expand Down Expand Up @@ -563,6 +563,9 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
&& "Local area offset should be in direction of stack growth");
int64_t Offset = LocalAreaOffset;

// Skew to be applied to alignment.
unsigned Skew = TFI.getStackAlignmentSkew(Fn);

// If there are fixed sized objects that are preallocated in the local area,
// non-fixed objects can't be allocated right at the start of local area.
// We currently don't support filling in holes in between fixed sized
Expand Down Expand Up @@ -593,7 +596,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {

unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
Offset = RoundUpToAlignment(Offset, Align);
Offset = RoundUpToAlignment(Offset, Align, Skew);

MFI->setObjectOffset(i, -Offset); // Set the computed offset
}
Expand All @@ -602,7 +605,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
for (int i = MaxCSFI; i >= MinCSFI ; --i) {
unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
Offset = RoundUpToAlignment(Offset, Align);
Offset = RoundUpToAlignment(Offset, Align, Skew);

MFI->setObjectOffset(i, Offset);
Offset += MFI->getObjectSize(i);
Expand All @@ -624,7 +627,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
RS->getScavengingFrameIndices(SFIs);
for (SmallVectorImpl<int>::iterator I = SFIs.begin(),
IE = SFIs.end(); I != IE; ++I)
AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign);
AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew);
}

// FIXME: Once this is working, then enable flag will change to a target
Expand All @@ -635,7 +638,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
unsigned Align = MFI->getLocalFrameMaxAlign();

// Adjust to alignment boundary.
Offset = RoundUpToAlignment(Offset, Align);
Offset = RoundUpToAlignment(Offset, Align, Skew);

DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");

Expand All @@ -662,7 +665,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
StackObjSet AddrOfObjs;

AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown,
Offset, MaxAlign);
Offset, MaxAlign, Skew);

// Assign large stack objects first.
for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
Expand Down Expand Up @@ -695,11 +698,11 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
}

AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
Offset, MaxAlign, Skew);
AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
Offset, MaxAlign, Skew);
AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
Offset, MaxAlign, Skew);
}

// Then assign frame offsets to stack objects that are not used to spill
Expand All @@ -719,7 +722,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
if (ProtectedObjs.count(i))
continue;

AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew);
}

// Make sure the special register scavenging spill slot is closest to the
Expand All @@ -729,7 +732,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
RS->getScavengingFrameIndices(SFIs);
for (SmallVectorImpl<int>::iterator I = SFIs.begin(),
IE = SFIs.end(); I != IE; ++I)
AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign);
AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew);
}

if (!TFI.targetHandlesStackFrameRounding()) {
Expand All @@ -754,7 +757,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// If the frame pointer is eliminated, all frame offsets will be relative to
// SP not FP. Align to MaxAlign so this works.
StackAlign = std::max(StackAlign, MaxAlign);
Offset = RoundUpToAlignment(Offset, StackAlign);
Offset = RoundUpToAlignment(Offset, StackAlign, Skew);
}

// Update frame info to pretend that this is part of the stack...
Expand Down
11 changes: 11 additions & 0 deletions lib/CodeGen/TargetFrameLoweringImpl.cpp
Expand Up @@ -17,6 +17,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Function.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
Expand Down Expand Up @@ -81,3 +82,13 @@ void TargetFrameLowering::determineCalleeSaves(MachineFunction &MF,
SavedRegs.set(Reg);
}
}

unsigned TargetFrameLowering::getStackAlignmentSkew(
const MachineFunction &MF) const {
// When HHVM function is called, the stack is skewed as the return address
// is removed from the stack before we enter the function.
if (LLVM_UNLIKELY(MF.getFunction()->getCallingConv() == CallingConv::HHVM))
return MF.getTarget().getPointerSize();

return 0;
}
2 changes: 2 additions & 0 deletions lib/IR/AsmWriter.cpp
Expand Up @@ -319,6 +319,8 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
case CallingConv::X86_64_Win64: Out << "x86_64_win64cc"; break;
case CallingConv::SPIR_FUNC: Out << "spir_func"; break;
case CallingConv::SPIR_KERNEL: Out << "spir_kernel"; break;
case CallingConv::HHVM: Out << "hhvmcc"; break;
case CallingConv::HHVM_C: Out << "hhvm_ccc"; break;
}
}

Expand Down
35 changes: 35 additions & 0 deletions lib/Target/X86/X86CallingConv.td
Expand Up @@ -202,6 +202,16 @@ def RetCC_X86_64_AnyReg : CallingConv<[
CCCustom<"CC_X86_AnyReg_Error">
]>;

// X86-64 HHVM return-value convention.
def RetCC_X86_64_HHVM: CallingConv<[
// Promote all types to i64
CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,

// Return: could return in any GP register save RSP and R12.
CCIfType<[i64], CCAssignToReg<[RBX, RBP, RDI, RSI, RDX, RCX, R8, R9,
RAX, R10, R11, R13, R14, R15]>>
]>;

// This is the root return-value convention for the X86-32 backend.
def RetCC_X86_32 : CallingConv<[
// If FastCC, use RetCC_X86_32_Fast.
Expand All @@ -227,6 +237,9 @@ def RetCC_X86_64 : CallingConv<[
CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,

// Handle HHVM calls.
CCIfCC<"CallingConv::HHVM", CCDelegateTo<RetCC_X86_64_HHVM>>,

// Mingw64 and native Win64 use Win64 CC
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,

Expand Down Expand Up @@ -319,6 +332,23 @@ def CC_X86_64_C : CallingConv<[
CCAssignToStack<64, 64>>
]>;

// Calling convention for X86-64 HHVM.
def CC_X86_64_HHVM : CallingConv<[
// Use all/any GP registers for args, except RSP.
CCIfType<[i64], CCAssignToReg<[RBX, R12, RBP, R15,
RDI, RSI, RDX, RCX, R8, R9,
RAX, R10, R11, R13, R14]>>
]>;

// Calling convention for helper functions in HHVM.
def CC_X86_64_HHVM_C : CallingConv<[
// Pass the first argument in RBP.
CCIfType<[i64], CCAssignToReg<[RBP]>>,

// Otherwise it's the same as the regular C calling convention.
CCDelegateTo<CC_X86_64_C>
]>;

// Calling convention used on Win64
def CC_X86_Win64_C : CallingConv<[
// FIXME: Handle byval stuff.
Expand Down Expand Up @@ -734,6 +764,8 @@ def CC_X86_64 : CallingConv<[
CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<CC_X86_Win64_C>>,
CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>,
CCIfCC<"CallingConv::HHVM", CCDelegateTo<CC_X86_64_HHVM>>,
CCIfCC<"CallingConv::HHVM_C", CCDelegateTo<CC_X86_64_HHVM_C>>,

// Mingw64 and native Win64 use Win64 CC
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
Expand Down Expand Up @@ -804,3 +836,6 @@ def CSR_64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add CSR_64,
def CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RDI, RSI, R14, R15,
(sequence "ZMM%u", 16, 31),
K4, K5, K6, K7)>;

// Only R12 is preserved for PHP calls in HHVM.
def CSR_64_HHVM : CalleeSavedRegs<(add R12)>;
12 changes: 5 additions & 7 deletions lib/Target/X86/X86ISelLowering.cpp
Expand Up @@ -2426,7 +2426,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
/// supports tail call optimization.
static bool IsTailCallConvention(CallingConv::ID CC) {
return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
CC == CallingConv::HiPE);
CC == CallingConv::HiPE || CC == CallingConv::HHVM);
}

/// \brief Return true if the calling convention is a C calling convention.
Expand Down Expand Up @@ -3900,19 +3900,17 @@ bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
/// Callee pop is necessary to support tail calls.
bool X86::isCalleePop(CallingConv::ID CallingConv,
bool is64Bit, bool IsVarArg, bool TailCallOpt) {

if (IsTailCallConvention(CallingConv))
return IsVarArg ? false : TailCallOpt;

switch (CallingConv) {
default:
return false;
case CallingConv::X86_StdCall:
case CallingConv::X86_FastCall:
case CallingConv::X86_ThisCall:
return !is64Bit;
case CallingConv::Fast:
case CallingConv::GHC:
case CallingConv::HiPE:
if (IsVarArg)
return false;
return TailCallOpt;
}
}

Expand Down
4 changes: 4 additions & 0 deletions lib/Target/X86/X86RegisterInfo.cpp
Expand Up @@ -256,6 +256,8 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_64_Intel_OCL_BI_SaveList;
break;
}
case CallingConv::HHVM:
return CSR_64_HHVM_SaveList;
case CallingConv::Cold:
if (Is64Bit)
return CSR_64_MostRegs_SaveList;
Expand Down Expand Up @@ -316,6 +318,8 @@ X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
return CSR_64_Intel_OCL_BI_RegMask;
break;
}
case CallingConv::HHVM:
return CSR_64_HHVM_RegMask;
case CallingConv::Cold:
if (Is64Bit)
return CSR_64_MostRegs_RegMask;
Expand Down

0 comments on commit 3b3752c

Please sign in to comment.