Skip to content
Permalink
Browse files
[JSC][32bit] Fix build after unlinked baseline JIT
https://bugs.webkit.org/show_bug.cgi?id=230803

Patch by Xan López <xan@igalia.com> on 2021-10-01
Reviewed by Saam Barati.

This is enough to make things build, but pretty much nothing works
yet. Will fix in a follow-up, this way we at least give people a
chance at not introducing more build regressions.

(With some code by Mikhail R. Gadelha and Geza Lore)

* CMakeLists.txt:
* assembler/MacroAssemblerARMv7.h:
(JSC::MacroAssemblerARMv7::branch32):
(JSC::MacroAssemblerARMv7::branchAdd32):
* assembler/MacroAssemblerMIPS.h:
(JSC::MacroAssemblerMIPS::branchAdd32):
* bytecode/CallLinkInfo.h:
(JSC::CallLinkInfo::addressOfMaxArgumentCountIncludingThis):
* jit/JIT.h:
* jit/JITCall32_64.cpp:
(JSC::JIT::compileOpCall):
* jit/JITInlines.h:
(JSC::JIT::emitValueProfilingSite):
(JSC::JIT::emitValueProfilingSiteIfProfiledOpcode):
(JSC::JIT::emitArrayProfilingSiteWithCell):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_loop_hint):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emitVarInjectionCheck):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::emitHasPrivateSlow):
(JSC::JIT::emitSlow_op_has_private_name):
(JSC::JIT::emitSlow_op_has_private_brand):
(JSC::JIT::emitResolveClosure):
(JSC::JIT::emit_op_resolve_scope):
(JSC::JIT::emit_op_get_from_scope):
(JSC::JIT::emitPutGlobalVariableIndirect):
(JSC::JIT::emit_op_put_to_scope):
(JSC::JIT::emitSlow_op_put_to_scope):
(JSC::JIT::emitVarInjectionCheck): Deleted.
* llint/LowLevelInterpreter.asm:
* llint/LowLevelInterpreter32_64.asm:

Canonical link: https://commits.webkit.org/242393@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@283389 268f45cc-cd09-0410-ab3c-d52691b4dbfc
  • Loading branch information
commit-queue@webkit.org committed Oct 1, 2021
1 parent 0c05b9f commit 284220971737e8456c6b1f78eabdd116852ec255
Showing 13 changed files with 236 additions and 61 deletions.
@@ -839,6 +839,7 @@ set(JavaScriptCore_PRIVATE_FRAMEWORK_HEADERS
jit/AssemblyHelpers.h
jit/AssemblyHelpersSpoolers.h
jit/BaselineJITCode.h
jit/CallFrameShuffleData.h
jit/CCallHelpers.h
jit/ExecutableAllocator.h
jit/ExecutableMemoryHandle.h
@@ -859,6 +860,7 @@ set(JavaScriptCore_PRIVATE_FRAMEWORK_HEADERS
jit/Reg.h
jit/RegisterAtOffset.h
jit/RegisterAtOffsetList.h
jit/RegisterMap.h
jit/RegisterSet.h
jit/Snippet.h
jit/SnippetParams.h
@@ -1,3 +1,49 @@
2021-10-01 Xan López <xan@igalia.com>

[JSC][32bit] Fix build after unlinked baseline JIT
https://bugs.webkit.org/show_bug.cgi?id=230803

Reviewed by Saam Barati.

This is enough to make things build, but pretty much nothing works
yet. Will fix in a follow-up, this way we at least give people a
chance at not introducing more build regressions.

(With some code by Mikhail R. Gadelha and Geza Lore)

* CMakeLists.txt:
* assembler/MacroAssemblerARMv7.h:
(JSC::MacroAssemblerARMv7::branch32):
(JSC::MacroAssemblerARMv7::branchAdd32):
* assembler/MacroAssemblerMIPS.h:
(JSC::MacroAssemblerMIPS::branchAdd32):
* bytecode/CallLinkInfo.h:
(JSC::CallLinkInfo::addressOfMaxArgumentCountIncludingThis):
* jit/JIT.h:
* jit/JITCall32_64.cpp:
(JSC::JIT::compileOpCall):
* jit/JITInlines.h:
(JSC::JIT::emitValueProfilingSite):
(JSC::JIT::emitValueProfilingSiteIfProfiledOpcode):
(JSC::JIT::emitArrayProfilingSiteWithCell):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_loop_hint):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emitVarInjectionCheck):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::emitHasPrivateSlow):
(JSC::JIT::emitSlow_op_has_private_name):
(JSC::JIT::emitSlow_op_has_private_brand):
(JSC::JIT::emitResolveClosure):
(JSC::JIT::emit_op_resolve_scope):
(JSC::JIT::emit_op_get_from_scope):
(JSC::JIT::emitPutGlobalVariableIndirect):
(JSC::JIT::emit_op_put_to_scope):
(JSC::JIT::emitSlow_op_put_to_scope):
(JSC::JIT::emitVarInjectionCheck): Deleted.
* llint/LowLevelInterpreter.asm:
* llint/LowLevelInterpreter32_64.asm:

2021-10-01 Yusuke Suzuki <ysuzuki@apple.com>

[JSC] Remove CodeBlock::m_numberOfNonArgumentValueProfiles since we can get the same value from UnlinkedCodeBlock
@@ -1610,7 +1610,14 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler<Assembler> {

Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
{
m_assembler.cmp(left, right);
if (left == ARMRegisters::sp) {
move(left, dataTempRegister);
m_assembler.cmp(dataTempRegister, right);
} else if (right == ARMRegisters::sp) {
move(right, dataTempRegister);
m_assembler.cmp(left, dataTempRegister);
} else
m_assembler.cmp(left, right);
return Jump(makeBranch(cond));
}

@@ -1840,27 +1847,13 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler<Assembler> {

Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
{
// Move the high bits of the address into addressTempRegister,
// and load the value into dataTempRegister.
move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));

// Do the add.
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
if (armImm.isValid())
m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
else {
// If the operand does not fit into an immediate then load it temporarily
// into addressTempRegister; since we're overwriting addressTempRegister
// we'll need to reload it with the high bits of the address afterwards.
move(imm, addressTempRegister);
m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
}

// Store the result.
m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
add32(imm, dest);
return Jump(makeBranch(cond));
}

Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, Address dest)
{
add32(imm, dest);
return Jump(makeBranch(cond));
}

@@ -2354,6 +2354,95 @@ class MacroAssemblerMIPS : public AbstractMacroAssembler<Assembler> {
return Jump();
}

Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, Address dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
if (cond == Overflow) {
if (m_fixedWidth) {
/*
load dest, dataTemp
move imm, immTemp
xor cmpTemp, dataTemp, immTemp
addu dataTemp, dataTemp, immTemp
store dataTemp, dest
bltz cmpTemp, No_overflow # diff sign bit -> no overflow
xor cmpTemp, dataTemp, immTemp
bgez cmpTemp, No_overflow # same sign big -> no overflow
nop
b Overflow
nop
b No_overflow
nop
nop
nop
No_overflow:
*/
load32(dest, dataTempRegister);
move(imm, immTempRegister);
m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);
m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
store32(dataTempRegister, dest);
m_assembler.bltz(cmpTempRegister, 9);
m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);
m_assembler.bgez(cmpTempRegister, 7);
m_assembler.nop();
} else {
m_assembler.lw(dataTempRegister, dest.base, dest.offset);
if (imm.m_value >= 0 && imm.m_value <= 32767) {
move(dataTempRegister, cmpTempRegister);
m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
m_assembler.bltz(cmpTempRegister, 9);
m_assembler.sw(dataTempRegister, dest.base, dest.offset);
m_assembler.bgez(dataTempRegister, 7);
m_assembler.nop();
} else if (imm.m_value >= -32768 && imm.m_value < 0) {
move(dataTempRegister, cmpTempRegister);
m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
m_assembler.bgez(cmpTempRegister, 9);
m_assembler.sw(dataTempRegister, dest.base, dest.offset);
m_assembler.bltz(cmpTempRegister, 7);
m_assembler.nop();
} else {
move(imm, immTempRegister);
m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);
m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
m_assembler.bltz(cmpTempRegister, 10);
m_assembler.sw(dataTempRegister, dest.base, dest.offset);
m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);
m_assembler.bgez(cmpTempRegister, 7);
m_assembler.nop();
}
}
return jump();
}
if (m_fixedWidth) {
move(imm, immTempRegister);
load32(dest, dataTempRegister);
add32(immTempRegister, dataTempRegister);
store32(dataTempRegister, dest);
} else {
m_assembler.lw(dataTempRegister, dest.base, dest.offset);
add32(imm, dataTempRegister);
m_assembler.sw(dataTempRegister, dest.base, dest.offset);
}
if (cond == Signed) {
// Check if dest is negative.
m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero);
return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
}
if (cond == PositiveOrZero) {
// Check if dest is not negative.
m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero);
return branchEqual(cmpTempRegister, MIPSRegisters::zero);
}
if (cond == Zero)
return branchEqual(dataTempRegister, MIPSRegisters::zero);
if (cond == NonZero)
return branchNotEqual(dataTempRegister, MIPSRegisters::zero);
ASSERT(0);
return Jump();
}

Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
@@ -25,6 +25,7 @@

#pragma once

#include "CallFrameShuffleData.h"
#include "CallMode.h"
#include "CodeLocation.h"
#include "CodeSpecializationKind.h"
@@ -41,7 +42,6 @@ class CCallHelpers;
class FunctionCodeBlock;
class JSFunction;
enum OpcodeID : unsigned;
struct CallFrameShuffleData;

struct UnlinkedCallLinkInfo;

@@ -314,6 +314,13 @@ class CallLinkInfo : public PackedRawSentinelNode<CallLinkInfo> {
return OBJECT_OFFSETOF(CallLinkInfo, m_maxArgumentCountIncludingThis);
}

#if USE(JSVALUE32_64)
uint32_t* addressOfMaxArgumentCountIncludingThis()
{
return &m_maxArgumentCountIncludingThis;
}
#endif

uint32_t maxArgumentCountIncludingThis()
{
return m_maxArgumentCountIncludingThis;
@@ -345,12 +345,13 @@ namespace JSC {

// This assumes that the value to profile is in regT0 and that regT3 is available for
// scratch.
#if USE(JSVALUE64)
template<typename Bytecode> void emitValueProfilingSite(const Bytecode&, GPRReg);
template<typename Bytecode> void emitValueProfilingSite(const Bytecode&, JSValueRegs);
#else
#if USE(JSVALUE32_64)
void emitValueProfilingSite(ValueProfile&, JSValueRegs);
template<typename Metadata> void emitValueProfilingSite(Metadata&, JSValueRegs);
template<typename Metadata>
std::enable_if_t<std::is_same<decltype(Metadata::m_profile), ValueProfile>::value, void>
emitValueProfilingSite(Metadata&, JSValueRegs);
#endif

void emitValueProfilingSiteIfProfiledOpcode(...);
@@ -363,6 +364,11 @@ namespace JSC {
template <typename Bytecode>
void emitArrayProfilingSiteWithCell(const Bytecode&, ptrdiff_t, RegisterID cellGPR, RegisterID scratchGPR);

#if USE(JSVALUE32_64)
void emitArrayProfilingSiteWithCell(RegisterID, ArrayProfile* , RegisterID);
void emitArrayProfilingSiteWithCell(RegisterID, RegisterID , RegisterID);
#endif

template<typename Op>
ECMAMode ecmaMode(Op);

@@ -310,7 +310,7 @@ void JIT::compileOpCall(const Instruction* instruction, unsigned callLinkInfoInd

checkStackPointerAlignment();
if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) {
auto slowPaths = info->emitTailCallFastPath(*this, regT0, regT2, CallLinkInfo::UseDataIC::Yes, [&] {
auto slowPaths = info->emitTailCallDataICFastPath(*this, regT0, regT2, [&] {
emitRestoreCalleeSaves();
prepareForTailCallSlow(regT2);
});
@@ -334,6 +334,12 @@ inline void JIT::emitValueProfilingSite(ValueProfile& valueProfile, JSValueRegs
store32(value.payloadGPR(), &descriptor->asBits.payload);
store32(value.tagGPR(), &descriptor->asBits.tag);
}

template<typename Metadata>
std::enable_if_t<std::is_same<decltype(Metadata::m_profile), ValueProfile>::value, void> JIT::emitValueProfilingSite(Metadata& metadata, JSValueRegs value)
{
emitValueProfilingSite(valueProfileFor(metadata, m_bytecodeIndex.checkpoint()), value);
}
#endif

template<typename Op>
@@ -342,29 +348,33 @@ inline std::enable_if_t<std::is_same<decltype(Op::Metadata::m_profile), ValuePro
#if USE(JSVALUE64)
emitValueProfilingSite(bytecode, regT0);
#else
emitValueProfilingSite(bytecode.metadata(m_codeBlock), JSValueRegs(regT1, regT0));
emitValueProfilingSite(bytecode, JSValueRegs(regT1, regT0));
#endif
}

inline void JIT::emitValueProfilingSiteIfProfiledOpcode(...) { }

#if USE(JSVALUE64)
template<typename Bytecode>
inline void JIT::emitValueProfilingSite(const Bytecode& bytecode, JSValueRegs value)
{
if (!shouldEmitProfiling())
return;

#if USE(JSVALUE64)
ptrdiff_t offset = m_unlinkedCodeBlock->metadata().offsetInMetadataTable(bytecode) + valueProfileOffsetFor<Bytecode>(m_bytecodeIndex.checkpoint()) + ValueProfile::offsetOfFirstBucket();
store64(value.gpr(), Address(s_metadataGPR, offset));
#else
UNUSED_PARAM(value);
UNUSED_PARAM(bytecode);
// FIXME.
#endif
}

template<typename Bytecode>
inline void JIT::emitValueProfilingSite(const Bytecode& bytecode, GPRReg resultReg)
{
emitValueProfilingSite(bytecode, JSValueRegs(resultReg));
}
#endif

template <typename Bytecode>
inline void JIT::emitArrayProfilingSiteWithCell(const Bytecode& bytecode, ptrdiff_t offsetOfArrayProfile, RegisterID cellGPR, RegisterID scratchGPR)
@@ -381,6 +391,24 @@ inline void JIT::emitArrayProfilingSiteWithCell(const Bytecode& bytecode, Regist
emitArrayProfilingSiteWithCell(bytecode, Bytecode::Metadata::offsetOfArrayProfile() + ArrayProfile::offsetOfLastSeenStructureID(), cellGPR, scratchGPR);
}

#if USE(JSVALUE32_64)
inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cellGPR, ArrayProfile* arrayProfile, RegisterID scratchGPR)
{
if (shouldEmitProfiling()) {
load32(MacroAssembler::Address(cellGPR, JSCell::structureIDOffset()), scratchGPR);
store32(scratchGPR, arrayProfile->addressOfLastSeenStructureID());
}
}

inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cellGPR, RegisterID arrayProfileGPR, RegisterID scratchGPR)
{
if (shouldEmitProfiling()) {
load32(MacroAssembler::Address(cellGPR, JSCell::structureIDOffset()), scratchGPR);
store32(scratchGPR, Address(arrayProfileGPR, ArrayProfile::offsetOfLastSeenStructureID()));
}
}
#endif

ALWAYS_INLINE int32_t JIT::getOperandConstantInt(VirtualRegister src)
{
return getConstantOperand(src).asInt32();
@@ -1466,10 +1466,12 @@ void JIT::emit_op_loop_hint(const Instruction* instruction)

#if USE(JSVALUE64)
JSValueRegs resultRegs(GPRInfo::returnValueGPR);
loadGlobalObject(resultRegs.gpr());
#else
JSValueRegs resultRegs(GPRInfo::returnValueGPR2, GPRInfo::returnValueGPR);
loadGlobalObject(resultRegs.payloadGPR());
move(TrustedImm32(JSValue::CellTag), resultRegs.tagGPR());
#endif
loadGlobalObject(resultRegs.gpr());
checkStackPointerAlignment();
emitRestoreCalleeSaves();
emitFunctionEpilogue();

0 comments on commit 2842209

Please sign in to comment.