Skip to content

Commit

Permalink
Unreviewed, reverting 272034@main.
Browse files Browse the repository at this point in the history
https://bugs.webkit.org/show_bug.cgi?id=266621

SP3 0.5% regression

Reverted changeset:

"Inline some functions used in copyCompactAndLinkCode"
https://bugs.webkit.org/show_bug.cgi?id=266362
https://commits.webkit.org/272034@main

Canonical link: https://commits.webkit.org/272257@main
  • Loading branch information
webkit-commit-queue authored and Constellation committed Dec 19, 2023
1 parent 4d2a8e2 commit a44e653
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 50 deletions.
70 changes: 35 additions & 35 deletions Source/JavaScriptCore/assembler/ARM64Assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ class ARM64Assembler {
{
}

ALWAYS_INLINE AssemblerBuffer& buffer() { return m_buffer; }
AssemblerBuffer& buffer() { return m_buffer; }

// (HS, LO, HI, LS) -> (AE, B, A, BE)
// (VS, VC) -> (O, NO)
Expand Down Expand Up @@ -415,8 +415,8 @@ class ARM64Assembler {
data.copyTypes = other.data.copyTypes;
return *this;
}
ALWAYS_INLINE intptr_t from() const { return data.realTypes.m_from; }
ALWAYS_INLINE void setFrom(const ARM64Assembler* assembler, intptr_t from)
intptr_t from() const { return data.realTypes.m_from; }
void setFrom(const ARM64Assembler* assembler, intptr_t from)
{
#if CPU(ARM64E)
data.realTypes.m_to = tagInt(to(assembler), static_cast<PtrTag>(from ^ bitwise_cast<intptr_t>(assembler)));
Expand All @@ -425,7 +425,7 @@ class ARM64Assembler {
#endif
data.realTypes.m_from = from;
}
ALWAYS_INLINE intptr_t to(const ARM64Assembler* assembler) const
intptr_t to(const ARM64Assembler* assembler) const
{
#if CPU(ARM64E)
return untagInt(data.realTypes.m_to, static_cast<PtrTag>(data.realTypes.m_from ^ bitwise_cast<intptr_t>(assembler)));
Expand All @@ -434,15 +434,15 @@ class ARM64Assembler {
return data.realTypes.m_to;
#endif
}
ALWAYS_INLINE JumpType type() const { return data.realTypes.m_type; }
ALWAYS_INLINE JumpLinkType linkType() const { return data.realTypes.m_linkType; }
ALWAYS_INLINE BranchType branchType() const { return data.realTypes.m_branchType; }
ALWAYS_INLINE void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
ALWAYS_INLINE Condition condition() const { return data.realTypes.m_condition; }
ALWAYS_INLINE bool is64Bit() const { return data.realTypes.m_is64Bit; }
ALWAYS_INLINE bool isThunk() const { return data.realTypes.m_isThunk == ThunkOrNot::Thunk; }
ALWAYS_INLINE unsigned bitNumber() const { return data.realTypes.m_bitNumber; }
ALWAYS_INLINE RegisterID compareRegister() const { return data.realTypes.m_compareRegister; }
JumpType type() const { return data.realTypes.m_type; }
JumpLinkType linkType() const { return data.realTypes.m_linkType; }
BranchType branchType() const { return data.realTypes.m_branchType; }
void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
Condition condition() const { return data.realTypes.m_condition; }
bool is64Bit() const { return data.realTypes.m_is64Bit; }
bool isThunk() const { return data.realTypes.m_isThunk == ThunkOrNot::Thunk; }
unsigned bitNumber() const { return data.realTypes.m_bitNumber; }
RegisterID compareRegister() const { return data.realTypes.m_compareRegister; }

private:
union {
Expand Down Expand Up @@ -3694,16 +3694,16 @@ class ARM64Assembler {

// Assembler admin methods:

static ALWAYS_INLINE int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }

static ALWAYS_INLINE bool canCompact(JumpType jumpType)
static bool canCompact(JumpType jumpType)
{
// Fixed jumps cannot be compacted
// Keep in mind that nearCall and tailCall are encoded as JumpNoCondition.
return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit);
}

static ALWAYS_INLINE JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
{
auto computeJumpType = [&](const uint8_t* from, const uint8_t* to) -> JumpLinkType {
auto jumpType = record.type();
Expand Down Expand Up @@ -3833,7 +3833,7 @@ class ARM64Assembler {

protected:
template<Datasize size>
static ALWAYS_INLINE bool checkMovk(int insn, int _hw, RegisterID _rd)
static bool checkMovk(int insn, int _hw, RegisterID _rd)
{
Datasize sf;
MoveWideOp opc;
Expand All @@ -3849,7 +3849,7 @@ class ARM64Assembler {
&& rd == _rd;
}

static ALWAYS_INLINE void linkPointer(int* address, void* valuePtr, bool flush = false)
static void linkPointer(int* address, void* valuePtr, bool flush = false)
{
Datasize sf;
MoveWideOp opc;
Expand All @@ -3868,7 +3868,7 @@ class ARM64Assembler {
}

template<BranchType type, CopyFunction copy = performJITMemcpy>
static ALWAYS_INLINE void linkJumpOrCall(int* from, const int* fromInstruction, void* to)
static void linkJumpOrCall(int* from, const int* fromInstruction, void* to)
{
static_assert(type == BranchType_JMP || type == BranchType_CALL);

Expand Down Expand Up @@ -3903,7 +3903,7 @@ class ARM64Assembler {
}

template<BranchTargetType type, CopyFunction copy = performJITMemcpy>
static ALWAYS_INLINE void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, const int* fromInstruction, void* to)
static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, const int* fromInstruction, void* to)
{
RELEASE_ASSERT(roundUpToMultipleOf<instructionSize>(from) == from);
ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
Expand All @@ -3929,7 +3929,7 @@ class ARM64Assembler {
}

template<BranchTargetType type, CopyFunction copy = performJITMemcpy>
static ALWAYS_INLINE void linkConditionalBranch(Condition condition, int* from, const int* fromInstruction, void* to)
static void linkConditionalBranch(Condition condition, int* from, const int* fromInstruction, void* to)
{
RELEASE_ASSERT(roundUpToMultipleOf<instructionSize>(from) == from);
ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
Expand All @@ -3955,7 +3955,7 @@ class ARM64Assembler {
}

template<BranchTargetType type, CopyFunction copy = performJITMemcpy>
static ALWAYS_INLINE void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, const int* fromInstruction, void* to)
static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, const int* fromInstruction, void* to)
{
RELEASE_ASSERT(roundUpToMultipleOf<instructionSize>(from) == from);
ASSERT(!(reinterpret_cast<intptr_t>(from) & 3));
Expand All @@ -3982,7 +3982,7 @@ class ARM64Assembler {
}

template<BranchType type>
static ALWAYS_INLINE void relinkJumpOrCall(int* from, const int* fromInstruction, void* to)
static void relinkJumpOrCall(int* from, const int* fromInstruction, void* to)
{
static_assert(type == BranchType_JMP || type == BranchType_CALL);
if ((type == BranchType_JMP) && disassembleNop(from)) {
Expand Down Expand Up @@ -4031,16 +4031,16 @@ class ARM64Assembler {
linkJumpOrCall<type>(from, fromInstruction, to);
}

static ALWAYS_INLINE int* addressOf(void* code, AssemblerLabel label)
static int* addressOf(void* code, AssemblerLabel label)
{
return reinterpret_cast<int*>(static_cast<char*>(code) + label.offset());
}

static ALWAYS_INLINE RegisterID disassembleXOrSp(int reg) { return reg == 31 ? ARM64Registers::sp : static_cast<RegisterID>(reg); }
static ALWAYS_INLINE RegisterID disassembleXOrZr(int reg) { return reg == 31 ? ARM64Registers::zr : static_cast<RegisterID>(reg); }
static ALWAYS_INLINE RegisterID disassembleXOrZrOrSp(bool useZr, int reg) { return reg == 31 ? (useZr ? ARM64Registers::zr : ARM64Registers::sp) : static_cast<RegisterID>(reg); }
static RegisterID disassembleXOrSp(int reg) { return reg == 31 ? ARM64Registers::sp : static_cast<RegisterID>(reg); }
static RegisterID disassembleXOrZr(int reg) { return reg == 31 ? ARM64Registers::zr : static_cast<RegisterID>(reg); }
static RegisterID disassembleXOrZrOrSp(bool useZr, int reg) { return reg == 31 ? (useZr ? ARM64Registers::zr : ARM64Registers::sp) : static_cast<RegisterID>(reg); }

static ALWAYS_INLINE bool disassembleAddSubtractImmediate(void* address, Datasize& sf, AddOp& op, SetFlags& S, int& shift, int& imm12, RegisterID& rn, RegisterID& rd)
static bool disassembleAddSubtractImmediate(void* address, Datasize& sf, AddOp& op, SetFlags& S, int& shift, int& imm12, RegisterID& rn, RegisterID& rd)
{
int insn = *static_cast<int*>(address);
sf = static_cast<Datasize>((insn >> 31) & 1);
Expand All @@ -4053,7 +4053,7 @@ class ARM64Assembler {
return (insn & 0x1f000000) == 0x11000000;
}

static ALWAYS_INLINE bool disassembleLoadStoreRegisterUnsignedImmediate(void* address, MemOpSize& size, bool& V, MemOp& opc, int& imm12, RegisterID& rn, RegisterID& rt)
static bool disassembleLoadStoreRegisterUnsignedImmediate(void* address, MemOpSize& size, bool& V, MemOp& opc, int& imm12, RegisterID& rn, RegisterID& rt)
{
int insn = *static_cast<int*>(address);
size = static_cast<MemOpSize>((insn >> 30) & 3);
Expand All @@ -4065,7 +4065,7 @@ class ARM64Assembler {
return (insn & 0x3b000000) == 0x39000000;
}

static ALWAYS_INLINE bool disassembleMoveWideImediate(void* address, Datasize& sf, MoveWideOp& opc, int& hw, uint16_t& imm16, RegisterID& rd)
static bool disassembleMoveWideImediate(void* address, Datasize& sf, MoveWideOp& opc, int& hw, uint16_t& imm16, RegisterID& rd)
{
int insn = *static_cast<int*>(address);
sf = static_cast<Datasize>((insn >> 31) & 1);
Expand All @@ -4076,13 +4076,13 @@ class ARM64Assembler {
return (insn & 0x1f800000) == 0x12800000;
}

static ALWAYS_INLINE bool disassembleNop(void* address)
static bool disassembleNop(void* address)
{
unsigned insn = *static_cast<unsigned*>(address);
return insn == 0xd503201f;
}

static ALWAYS_INLINE bool disassembleCompareAndBranchImmediate(void* address, Datasize& sf, bool& op, int& imm19, RegisterID& rt)
static bool disassembleCompareAndBranchImmediate(void* address, Datasize& sf, bool& op, int& imm19, RegisterID& rt)
{
int insn = *static_cast<int*>(address);
sf = static_cast<Datasize>((insn >> 31) & 1);
Expand All @@ -4093,7 +4093,7 @@ class ARM64Assembler {

}

static ALWAYS_INLINE bool disassembleConditionalBranchImmediate(void* address, unsigned& op01, int& imm19, Condition &condition)
static bool disassembleConditionalBranchImmediate(void* address, unsigned& op01, int& imm19, Condition &condition)
{
int insn = *static_cast<int*>(address);
op01 = ((insn >> 23) & 0x2) | ((insn >> 4) & 0x1);
Expand All @@ -4102,7 +4102,7 @@ class ARM64Assembler {
return (insn & 0xfe000000) == 0x54000000;
}

static ALWAYS_INLINE bool disassembleTestAndBranchImmediate(void* address, bool& op, unsigned& bitNumber, int& imm14, RegisterID& rt)
static bool disassembleTestAndBranchImmediate(void* address, bool& op, unsigned& bitNumber, int& imm14, RegisterID& rt)
{
int insn = *static_cast<int*>(address);
op = (insn >> 24) & 0x1;
Expand All @@ -4113,7 +4113,7 @@ class ARM64Assembler {

}

static ALWAYS_INLINE bool disassembleUnconditionalBranchImmediate(void* address, bool& op, int& imm26)
static bool disassembleUnconditionalBranchImmediate(void* address, bool& op, int& imm26)
{
int insn = *static_cast<int*>(address);
op = (insn >> 31) & 1;
Expand Down
20 changes: 10 additions & 10 deletions Source/JavaScriptCore/assembler/AssemblerBuffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ namespace JSC {
threadSpecificData->takeBufferIfLarger(WTFMove(m_storage));
}

ALWAYS_INLINE bool isAvailable(unsigned space)
bool isAvailable(unsigned space)
{
return m_index + space <= m_storage.capacity();
}
Expand All @@ -330,7 +330,7 @@ namespace JSC {
outOfLineGrow();
}

ALWAYS_INLINE bool isAligned(int alignment) const
bool isAligned(int alignment) const
{
return !(m_index & (alignment - 1));
}
Expand All @@ -343,10 +343,10 @@ namespace JSC {
void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); }
void putInt64(int64_t value) { putIntegral(value); }
#endif
ALWAYS_INLINE void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); }
ALWAYS_INLINE void putInt(int32_t value) { putIntegral(value); }
void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); }
void putInt(int32_t value) { putIntegral(value); }

ALWAYS_INLINE size_t codeSize() const
size_t codeSize() const
{
return m_index;
}
Expand All @@ -362,14 +362,14 @@ namespace JSC {
}
#endif

ALWAYS_INLINE AssemblerLabel label() const
AssemblerLabel label() const
{
return AssemblerLabel(m_index);
}

unsigned debugOffset() { return m_index; }

ALWAYS_INLINE AssemblerData&& releaseAssemblerData()
AssemblerData&& releaseAssemblerData()
{
return WTFMove(m_storage);
}
Expand Down Expand Up @@ -438,12 +438,12 @@ namespace JSC {
#endif

#if CPU(ARM64E)
ALWAYS_INLINE ARM64EHash<ShouldSign::Yes>& arm64eHash() { return m_hash; }
ARM64EHash<ShouldSign::Yes>& arm64eHash() { return m_hash; }
#endif

protected:
template<typename IntegralType>
ALWAYS_INLINE void putIntegral(IntegralType value)
void putIntegral(IntegralType value)
{
unsigned nextIndex = m_index + sizeof(IntegralType);
if (UNLIKELY(nextIndex > m_storage.capacity()))
Expand All @@ -452,7 +452,7 @@ namespace JSC {
}

template<typename IntegralType>
ALWAYS_INLINE void putIntegralUnchecked(IntegralType value)
void putIntegralUnchecked(IntegralType value)
{
#if CPU(ARM64)
static_assert(sizeof(value) == 4);
Expand Down
10 changes: 5 additions & 5 deletions Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler<Assembler> {
static constexpr ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -((Assembler::NUMBER_OF_ADDRESS_ENCODING_INSTRUCTIONS + 1) * INSTRUCTION_SIZE);

public:
ALWAYS_INLINE MacroAssemblerARM64()
MacroAssemblerARM64()
: m_dataMemoryTempRegister(this, dataTempRegister)
, m_cachedMemoryTempRegister(this, memoryTempRegister)
, m_makeJumpPatchable(false)
Expand All @@ -88,14 +88,14 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler<Assembler> {
static constexpr Assembler::JumpType DefaultJump = Assembler::JumpNoConditionFixedSize;

Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
ALWAYS_INLINE static bool canCompact(JumpType jumpType) { return Assembler::canCompact(jumpType); }
ALWAYS_INLINE static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return Assembler::computeJumpType(record, from, to); }
ALWAYS_INLINE static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
static bool canCompact(JumpType jumpType) { return Assembler::canCompact(jumpType); }
static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return Assembler::computeJumpType(record, from, to); }
static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return Assembler::jumpSizeDelta(jumpType, jumpLinkType); }

template <Assembler::CopyFunction copy>
ALWAYS_INLINE static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return Assembler::link<copy>(record, from, fromInstruction, to); }

ALWAYS_INLINE static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
{
// This is the largest 32-bit access allowed, aligned to 64-bit boundary.
return !(value & ~0x3ff8);
Expand Down

0 comments on commit a44e653

Please sign in to comment.