From f2f3c91fa89c2bef7d30bba0324cdba55aa3ae4f Mon Sep 17 00:00:00 2001 From: Keith Miller Date: Fri, 24 Mar 2023 05:45:16 -0700 Subject: [PATCH] DFG should support tuples https://bugs.webkit.org/show_bug.cgi?id=253413 Reviewed by Yusuke Suzuki. This change adds support for tuples in the DFG. It works similarly to how tuples work in B3 where there is a side buffer which holds most of the metadata for a tuple. In the DFG there are three pieces of information held here: 1) The reference count 2) The result flags 3) The virtual register 1) tells us if the ExtractFromTuple Node for a given tuple result still exists (e.g. it could have been constant folded or dead code eliminated). From that we will decide to fill (3) the virtual register when allocating registers. If we didn't have the reference count then we would have no way to know that the virtual registers isn't going to be consumed thus we will leak it. Lastly we have (2) the result flags for the node. This tells the ExtractFromTuple and theoretically (as right now the only ExtractFromTuple users produce Int32s) any consumers of the ExtractFromTuple what value format the tuple's result will be in. For the DFG, we don't need this because we know there's at most one ExtractFromTuple for any given tuple index since we don't duplicate code. When dumping the DFG graph ExtractFromTuple follows the same pattern as B3 and uses `< EnumeratorNextUpdateIndexAndMode(Check:Untyped:D@97, Check:Untyped:D@98, Check:Untyped:D@100, Check:Untyped:D@99, VarArgs, SelectUsingPredictions+NonArray+InBounds+AsIs+Read, enumeratorModes = 4, R:World, W:Heap, Exits, ClobbersExit, bc#115, ExitValid) 7 1 0: D@102:< 1:-> ExtractFromTuple(Check:Untyped:D@101, Int32|UseAsOther, <<0, bc#115, ExitInvalid) 8 1 0: D@103: MovHint(Check:Untyped:D@102, MustGen, loc10, W:SideState, ClobbersExit, bc#115, ExitInvalid) 9 1 0: D@104:< 1:-> ExtractFromTuple(Check:Untyped:D@101, Int32|UseAsOther, <<1, bc#115, ExitInvalid) This patch also adds support for calling operations in both the FTL/B3 via CCall. CCall can take exactly the tuple of `{ pointerType(), pointerType() }`, which, for every calling conevention we support, should be returned in both the return value registers. As the only way to look into a tuple is via the B3 prodecure, the first Air::Arg of any CCall/ColdCCall Inst is now the CCallSpecial for the compiling Air::Code. This gives us access to Air::Code inside CCallCustom::forEachArg and isValidForm. * JSTests/stress/for-in-redefine-enumerable.js: (shouldBe): * Source/JavaScriptCore/b3/B3LowerToAir.cpp: * Source/JavaScriptCore/b3/B3Type.h: (JSC::B3::pointerType): (JSC::B3::registerType): * Source/JavaScriptCore/b3/B3Validate.cpp: * Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp: (JSC::B3::Air::cCallResultCount): (JSC::B3::Air::cCallArgumentRegisterWidth): (JSC::B3::Air::cCallResult): * Source/JavaScriptCore/b3/air/AirCCallingConvention.h: * Source/JavaScriptCore/b3/air/AirCustom.cpp: (JSC::B3::Air::CCallCustom::isValidForm): * Source/JavaScriptCore/b3/air/AirCustom.h: (JSC::B3::Air::CCallCustom::forEachArg): * Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp: (JSC::B3::Air::lowerAfterRegAlloc): * Source/JavaScriptCore/b3/air/AirLowerMacros.cpp: (JSC::B3::Air::lowerMacros): * Source/JavaScriptCore/b3/air/AirOpcode.opcodes: * Source/JavaScriptCore/b3/testb3.h: * Source/JavaScriptCore/b3/testb3_3.cpp: (addCallTests): * Source/JavaScriptCore/b3/testb3_5.cpp: (JSC_DEFINE_JIT_OPERATION): (testCallPairResult): (testCallPairResultRare): * Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h: (JSC::DFG::AbstractInterpreter::setTupleConstant): * Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h: (JSC::DFG::AbstractInterpreter::executeEffects): * Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp: (JSC::DFG::AtTailAbstractState::AtTailAbstractState): * Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h: (JSC::DFG::AtTailAbstractState::forTupleNode): (JSC::DFG::AtTailAbstractState::clearForTupleNode): (JSC::DFG::AtTailAbstractState::setForTupleNode): (JSC::DFG::AtTailAbstractState::setTypeForTupleNode): (JSC::DFG::AtTailAbstractState::setNonCellTypeForTupleNode): (JSC::DFG::AtTailAbstractState::makeBytecodeTopForTupleNode): (JSC::DFG::AtTailAbstractState::makeHeapTopForTupleNode): * Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp: (JSC::DFG::ByteCodeParser::addToGraph): (JSC::DFG::ByteCodeParser::parseBlock): * Source/JavaScriptCore/dfg/DFGClobberize.h: (JSC::DFG::clobberize): * Source/JavaScriptCore/dfg/DFGDoesGC.cpp: (JSC::DFG::doesGC): * Source/JavaScriptCore/dfg/DFGFixupPhase.cpp: (JSC::DFG::FixupPhase::fixupNode): * Source/JavaScriptCore/dfg/DFGGenerationInfo.h: (JSC::DFG::GenerationInfo::initFromTupleResult): * Source/JavaScriptCore/dfg/DFGGraph.cpp: (JSC::DFG::Graph::dump): * Source/JavaScriptCore/dfg/DFGGraph.h: * Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp: (JSC::DFG::InPlaceAbstractState::InPlaceAbstractState): * Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h: (JSC::DFG::InPlaceAbstractState::forTupleNode): (JSC::DFG::InPlaceAbstractState::clearForTupleNode): (JSC::DFG::InPlaceAbstractState::setForTupleNode): (JSC::DFG::InPlaceAbstractState::setTypeForTupleNode): (JSC::DFG::InPlaceAbstractState::setNonCellTypeForTupleNode): (JSC::DFG::InPlaceAbstractState::makeBytecodeTopForTupleNode): (JSC::DFG::InPlaceAbstractState::makeHeapTopForTupleNode): * Source/JavaScriptCore/dfg/DFGMayExit.cpp: * Source/JavaScriptCore/dfg/DFGNode.h: (JSC::DFG::Node::isTuple const): (JSC::DFG::Node::setTupleOffset): (JSC::DFG::Node::tupleOffset const): (JSC::DFG::Node::hasExtractOffset const): (JSC::DFG::Node::extractOffset const): (JSC::DFG::Node::tupleIndex const): (JSC::DFG::Node::tupleSize const): (JSC::DFG::Node::hasVirtualRegister): (JSC::DFG::Node::virtualRegister): (JSC::DFG::Node::setVirtualRegister): * Source/JavaScriptCore/dfg/DFGNodeType.h: * Source/JavaScriptCore/dfg/DFGOperations.cpp: (JSC::DFG::JSC_DEFINE_JIT_OPERATION): * Source/JavaScriptCore/dfg/DFGOperations.h: (JSC::DFG::makeUGPRPair): * Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp: * Source/JavaScriptCore/dfg/DFGSafeToExecute.h: (JSC::DFG::safeToExecute): * Source/JavaScriptCore/dfg/DFGScoreBoard.h: (JSC::DFG::ScoreBoard::use): * Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp: * Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h: (JSC::DFG::SpeculativeJIT::strictInt32TupleResultWithoutUsingChildren): * Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp: (JSC::DFG::SpeculativeJIT::compile): * Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp: (JSC::DFG::SpeculativeJIT::compile): * Source/JavaScriptCore/dfg/DFGValidate.cpp: * Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp: (JSC::DFG::VirtualRegisterAllocationPhase::run): * Source/JavaScriptCore/ftl/FTLCapabilities.cpp: (JSC::FTL::canCompile): * Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp: (JSC::FTL::DFG::LowerDFGToB3::LowerDFGToB3): (JSC::FTL::DFG::LowerDFGToB3::compileNode): (JSC::FTL::DFG::LowerDFGToB3::compileCompareStrictEq): * Source/JavaScriptCore/ftl/FTLOutput.cpp: (JSC::FTL::Output::extract): * Source/JavaScriptCore/ftl/FTLOutput.h: * Source/JavaScriptCore/wasm/WasmAirIRGeneratorBase.h: (JSC::Wasm::AirIRGeneratorBase::emitCCall): Canonical link: https://commits.webkit.org/262068@main --- JSTests/stress/for-in-redefine-enumerable.js | 5 +- Source/JavaScriptCore/b3/B3LowerToAir.cpp | 12 +- Source/JavaScriptCore/b3/B3Type.h | 9 +- Source/JavaScriptCore/b3/B3Validate.cpp | 8 + .../b3/air/AirCCallingConvention.cpp | 29 ++-- .../b3/air/AirCCallingConvention.h | 6 +- Source/JavaScriptCore/b3/air/AirCustom.cpp | 27 +++- Source/JavaScriptCore/b3/air/AirCustom.h | 15 +- .../b3/air/AirLowerAfterRegAlloc.cpp | 32 ++-- .../JavaScriptCore/b3/air/AirLowerMacros.cpp | 32 ++-- .../JavaScriptCore/b3/air/AirOpcode.opcodes | 2 +- Source/JavaScriptCore/b3/testb3.h | 2 + Source/JavaScriptCore/b3/testb3_3.cpp | 5 +- Source/JavaScriptCore/b3/testb3_5.cpp | 69 ++++++++ .../dfg/DFGAbstractInterpreter.h | 8 + .../dfg/DFGAbstractInterpreterInlines.h | 37 ++--- .../dfg/DFGAtTailAbstractState.cpp | 2 + .../dfg/DFGAtTailAbstractState.h | 78 +++++++++ .../JavaScriptCore/dfg/DFGByteCodeParser.cpp | 14 +- Source/JavaScriptCore/dfg/DFGClobberize.h | 5 +- Source/JavaScriptCore/dfg/DFGDoesGC.cpp | 3 +- Source/JavaScriptCore/dfg/DFGFixupPhase.cpp | 8 +- Source/JavaScriptCore/dfg/DFGGenerationInfo.h | 8 + Source/JavaScriptCore/dfg/DFGGraph.cpp | 14 +- Source/JavaScriptCore/dfg/DFGGraph.h | 13 +- .../dfg/DFGInPlaceAbstractState.cpp | 1 + .../dfg/DFGInPlaceAbstractState.h | 96 +++++++++++ Source/JavaScriptCore/dfg/DFGMayExit.cpp | 3 +- Source/JavaScriptCore/dfg/DFGNode.h | 54 ++++++- Source/JavaScriptCore/dfg/DFGNodeType.h | 5 +- Source/JavaScriptCore/dfg/DFGOperations.cpp | 10 +- Source/JavaScriptCore/dfg/DFGOperations.h | 16 +- .../dfg/DFGPredictionPropagationPhase.cpp | 42 ++++- Source/JavaScriptCore/dfg/DFGSafeToExecute.h | 4 +- Source/JavaScriptCore/dfg/DFGScoreBoard.h | 1 + .../JavaScriptCore/dfg/DFGSpeculativeJIT.cpp | 150 +++++++++--------- Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h | 31 +++- .../dfg/DFGSpeculativeJIT32_64.cpp | 15 +- .../dfg/DFGSpeculativeJIT64.cpp | 15 +- Source/JavaScriptCore/dfg/DFGValidate.cpp | 32 +++- .../dfg/DFGVirtualRegisterAllocationPhase.cpp | 18 +++ Source/JavaScriptCore/ftl/FTLCapabilities.cpp | 3 +- Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp | 77 ++++++--- Source/JavaScriptCore/ftl/FTLOutput.cpp | 5 + Source/JavaScriptCore/ftl/FTLOutput.h | 1 + .../wasm/WasmAirIRGeneratorBase.h | 3 +- 46 files changed, 776 insertions(+), 249 deletions(-) diff --git a/JSTests/stress/for-in-redefine-enumerable.js b/JSTests/stress/for-in-redefine-enumerable.js index d3a896a6f66a..56988ba09437 100644 --- a/JSTests/stress/for-in-redefine-enumerable.js +++ b/JSTests/stress/for-in-redefine-enumerable.js @@ -5,7 +5,7 @@ function assert(x) { function shouldBe(actual, expected) { if (actual !== expected) - throw new Error(`Bad value: ${actual}.`); + throw new Error(`Bad value: ${actual} expected ${expected}.`); } const enumDesc = { value: 0, writable: true, enumerable: true, configurable: true }; @@ -15,13 +15,16 @@ const dontEnumDesc = { value: 0, writable: true, enumerable: false, configurable (() => { function test() { var arr = Object.defineProperties([0, 0, 0], { 1: dontEnumDesc }); + var count = 0; for (var i in arr) { + count++; assert(i in arr); shouldBe(arr[i], 0); ++arr[i]; if (i === "0") Object.defineProperties(arr, { 1: enumDesc, 2: dontEnumDesc }); } + shouldBe(count, 1); shouldBe(arr[0], 1); shouldBe(arr[1], 0); shouldBe(arr[2], 0); diff --git a/Source/JavaScriptCore/b3/B3LowerToAir.cpp b/Source/JavaScriptCore/b3/B3LowerToAir.cpp index b13a2a55fdde..8302c75201d6 100644 --- a/Source/JavaScriptCore/b3/B3LowerToAir.cpp +++ b/Source/JavaScriptCore/b3/B3LowerToAir.cpp @@ -29,6 +29,7 @@ #if ENABLE(B3_JIT) #include "AirBlockInsertionSet.h" +#include "AirCCallSpecial.h" #include "AirCode.h" #include "AirHelpers.h" #include "AirInsertionSet.h" @@ -144,6 +145,7 @@ class LowerToAir { } case Get: case Patchpoint: + case B3::CCall: case BottomTuple: { if (value->type().isTuple()) ensureTupleTmps(value, m_tupleValueToTmps); @@ -446,6 +448,7 @@ class LowerToAir { switch (tupleValue->opcode()) { case Phi: case Patchpoint: + case B3::CCall: case BottomTuple: { return m_tupleValueToTmps.find(tupleValue)->value; } @@ -4457,7 +4460,7 @@ class LowerToAir { case B3::CCall: { CCallValue* cCall = m_value->as(); - Inst inst(m_isRare ? Air::ColdCCall : Air::CCall, cCall); + Inst inst(m_isRare ? Air::ColdCCall : Air::CCall, cCall, Arg::special(m_code.cCallSpecial())); // We have a ton of flexibility regarding the callee argument, but currently, we don't // use it yet. It gets weird for reasons: @@ -4470,8 +4473,11 @@ class LowerToAir { // FIXME: https://bugs.webkit.org/show_bug.cgi?id=151052 inst.args.append(tmp(cCall->child(0))); - if (cCall->type() != Void) - inst.args.append(tmp(cCall)); + if (cCall->type() != Void) { + forEachImmOrTmp(cCall, [&] (Arg arg, Type, unsigned) { + inst.args.append(arg.tmp()); + }); + } for (unsigned i = 1; i < cCall->numChildren(); ++i) inst.args.append(immOrTmp(cCall->child(i))); diff --git a/Source/JavaScriptCore/b3/B3Type.h b/Source/JavaScriptCore/b3/B3Type.h index 48835cdbcffc..d9ba4306fa31 100644 --- a/Source/JavaScriptCore/b3/B3Type.h +++ b/Source/JavaScriptCore/b3/B3Type.h @@ -135,13 +135,20 @@ inline bool Type::isVector() const return kind() == V128; } -inline Type pointerType() +constexpr Type pointerType() { if (is32Bit()) return Int32; return Int64; } +constexpr Type registerType() +{ + if (isRegister64Bit()) + return Int64; + return Int32; +} + inline size_t sizeofType(Type type) { switch (type.kind()) { diff --git a/Source/JavaScriptCore/b3/B3Validate.cpp b/Source/JavaScriptCore/b3/B3Validate.cpp index fc97ce050517..eefc83a69804 100644 --- a/Source/JavaScriptCore/b3/B3Validate.cpp +++ b/Source/JavaScriptCore/b3/B3Validate.cpp @@ -674,6 +674,14 @@ class Validater { VALIDATE(!value->kind().hasExtraBits(), ("At ", *value)); VALIDATE(value->numChildren() >= 1, ("At ", *value)); VALIDATE(value->child(0)->type() == pointerType(), ("At ", *value)); + if (value->type().isTuple()) { + // FIXME: Right now we only support a pair of register sized values since on every calling + // convention we support that's returned in returnValueGPR/returnValueGPR2, respectively. + VALIDATE(m_procedure.resultCount(value->type()) == 2, ("At ", *value)); + VALIDATE(m_procedure.typeAtOffset(value->type(), 0) == registerType(), ("At ", *value)); + VALIDATE(m_procedure.typeAtOffset(value->type(), 1) == registerType(), ("At ", *value)); + } + break; case Patchpoint: VALIDATE(!value->kind().hasExtraBits(), ("At ", *value)); diff --git a/Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp b/Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp index fa49091a03fc..c6fafaa42346 100644 --- a/Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp +++ b/Source/JavaScriptCore/b3/air/AirCCallingConvention.cpp @@ -56,7 +56,7 @@ void marshallCCallArgumentImpl(Vector& result, unsigned& argumentCount, uns // In the rare case when the Arg width does not match the argument width // (32-bit arm passing a 64-bit argument), we respect the width needed // for each stack access: - slotSize = bytesForWidth(cCallArgumentRegisterWidth(child)); + slotSize = bytesForWidth(cCallArgumentRegisterWidth(child->type())); // but the logical stack slot uses the natural alignment of the argument slotAlignment = sizeofType(child->type()); @@ -103,7 +103,7 @@ Vector computeCCallingConvention(Code& code, CCallValue* value) return result; } -size_t cCallResultCount(CCallValue* value) +size_t cCallResultCount(Code& code, CCallValue* value) { switch (value->type().kind()) { case Void: @@ -113,7 +113,12 @@ size_t cCallResultCount(CCallValue* value) return 2; return 1; case Tuple: - RELEASE_ASSERT_NOT_REACHED(); + // We only support tuples that return exactly two register sized ints. + UNUSED_PARAM(code); + ASSERT(code.proc().resultCount(value->type()) == 2); + ASSERT(code.proc().typeAtOffset(value->type(), 0) == pointerType()); + ASSERT(code.proc().typeAtOffset(value->type(), 1) == pointerType()); + return 2; default: return 1; @@ -136,19 +141,19 @@ size_t cCallArgumentRegisterCount(const Value* value) } } -Width cCallArgumentRegisterWidth(const Value* value) +Width cCallArgumentRegisterWidth(Type type) { if constexpr (is32Bit()) { - if (value->type() == Int64) + if (type == Int64) return Width32; } - return widthForType(value->type()); + return widthForType(type); } -Tmp cCallResult(CCallValue* value, unsigned index) +Tmp cCallResult(Code& code, CCallValue* value, unsigned index) { - ASSERT_UNUSED(index, index <= (is64Bit() ? 1 : 2)); + ASSERT(index < 2); switch (value->type().kind()) { case Void: return Tmp(); @@ -160,9 +165,15 @@ Tmp cCallResult(CCallValue* value, unsigned index) return Tmp(GPRInfo::returnValueGPR); case Float: case Double: + ASSERT(!index); return Tmp(FPRInfo::returnValueFPR); - case V128: case Tuple: + ASSERT_UNUSED(code, code.proc().resultCount(value->type()) == 2); + // We only support functions that return each parameter in its own register for now. + ASSERT(code.proc().typeAtOffset(value->type(), 0) == registerType()); + ASSERT(code.proc().typeAtOffset(value->type(), 1) == registerType()); + return index ? Tmp(GPRInfo::returnValueGPR2) : Tmp(GPRInfo::returnValueGPR); + case V128: break; } diff --git a/Source/JavaScriptCore/b3/air/AirCCallingConvention.h b/Source/JavaScriptCore/b3/air/AirCCallingConvention.h index 205ed116e99b..a7aae2be2ddb 100644 --- a/Source/JavaScriptCore/b3/air/AirCCallingConvention.h +++ b/Source/JavaScriptCore/b3/air/AirCCallingConvention.h @@ -42,7 +42,7 @@ class Code; Vector computeCCallingConvention(Code&, CCallValue*); -size_t cCallResultCount(CCallValue*); +size_t cCallResultCount(Code&, CCallValue*); /* * On some platforms (well, on 32-bit platforms,) C functions can take arguments @@ -54,9 +54,9 @@ size_t cCallResultCount(CCallValue*); // Return the number of Air::Args needed to marshall this Value to the C function size_t cCallArgumentRegisterCount(const Value*); // Return the width of the individual Air::Args needed to marshall this value -Width cCallArgumentRegisterWidth(const Value*); +Width cCallArgumentRegisterWidth(Type); -Tmp cCallResult(CCallValue*, unsigned); +Tmp cCallResult(Code&, CCallValue*, unsigned); Inst buildCCall(Code&, Value* origin, const Vector&); diff --git a/Source/JavaScriptCore/b3/air/AirCustom.cpp b/Source/JavaScriptCore/b3/air/AirCustom.cpp index 5335f7098424..1849c40d6ec1 100644 --- a/Source/JavaScriptCore/b3/air/AirCustom.cpp +++ b/Source/JavaScriptCore/b3/air/AirCustom.cpp @@ -31,6 +31,7 @@ #include "AirCCallingConvention.h" #include "AirInstInlines.h" #include "B3CCallValue.h" +#include "B3ProcedureInlines.h" #include "B3ValueInlines.h" #include "CCallHelpers.h" @@ -65,8 +66,14 @@ bool CCallCustom::isValidForm(Inst& inst) if (!value) return false; - size_t resultCount = cCallResultCount(value); - size_t expectedArgCount = resultCount; + if (!inst.args[0].isSpecial()) + return false; + + Special* special = inst.args[0].special(); + Code& code = special->code(); + + size_t resultCount = cCallResultCount(code, value); + size_t expectedArgCount = resultCount + 1; // first Arg is always CCallSpecial. for (Value* child : value->children()) { ASSERT(child->type() != Tuple); expectedArgCount += cCallArgumentRegisterCount(child); @@ -76,22 +83,28 @@ bool CCallCustom::isValidForm(Inst& inst) return false; // The arguments can only refer to the stack, tmps, or immediates. - for (Arg& arg : inst.args) { + for (unsigned i = inst.args.size() - 1; i; --i) { + Arg arg = inst.args[i]; if (!arg.isTmp() && !arg.isStackMemory() && !arg.isSomeImm()) return false; } // Callee - if (!inst.args[0].isGP()) + if (!inst.args[1].isGP()) return false; - unsigned offset = 1; + unsigned offset = 2; // If there is a result then it cannot be an immediate. - for (size_t i = 0 ; i < resultCount; ++i) { + for (size_t i = 0; i < resultCount; ++i) { if (inst.args[offset].isSomeImm()) return false; - if (!inst.args[offset].canRepresent(value)) + + if (value->type().isTuple()) { + Type type = code.proc().typeAtOffset(value->type(), i); + if (!inst.args[offset].canRepresent(type)) + return false; + } else if (!inst.args[offset].canRepresent(value)) return false; offset++; } diff --git a/Source/JavaScriptCore/b3/air/AirCustom.h b/Source/JavaScriptCore/b3/air/AirCustom.h index a3e3a5a56ff4..e6d989995b3e 100644 --- a/Source/JavaScriptCore/b3/air/AirCustom.h +++ b/Source/JavaScriptCore/b3/air/AirCustom.h @@ -134,7 +134,10 @@ struct CCallCustom : public CommonCustomBase { { CCallValue* value = inst.origin->as(); - unsigned index = 0; + Code& code = inst.args[0].special()->code(); + + // Skip the CCallSpecial Arg. + unsigned index = 1; auto next = [&](Arg::Role role, Bank bank, Width width) { functor(inst.args[index++], role, bank, width); @@ -142,11 +145,13 @@ struct CCallCustom : public CommonCustomBase { next(Arg::Use, GP, pointerWidth()); // callee - for (size_t n = cCallResultCount(value); n; --n) { + size_t resultCount = cCallResultCount(code, value); + for (size_t n = 0; n < resultCount; ++n) { + Type type = value->type().isTuple() ? code.proc().typeAtOffset(value->type(), n) : value->type(); next( Arg::Def, - bankForType(value->type()), - cCallArgumentRegisterWidth(value) + bankForType(type), + cCallArgumentRegisterWidth(type) ); } @@ -156,7 +161,7 @@ struct CCallCustom : public CommonCustomBase { next( Arg::Use, bankForType(child->type()), - cCallArgumentRegisterWidth(child) + cCallArgumentRegisterWidth(child->type()) ); } } diff --git a/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp b/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp index 45b8c405cfef..31a4268b4876 100644 --- a/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp +++ b/Source/JavaScriptCore/b3/air/AirLowerAfterRegAlloc.cpp @@ -36,6 +36,7 @@ #include "AirRegLiveness.h" #include "AirPhaseScope.h" #include "B3CCallValue.h" +#include "B3ProcedureInlines.h" #include "B3ValueInlines.h" #include "RegisterSet.h" #include @@ -193,13 +194,17 @@ void lowerAfterRegAlloc(Code& code) ScalarRegisterSet preUsed = liveRegs.buildScalarRegisterSet(); ScalarRegisterSet postUsed = preUsed; Vector destinations = computeCCallingConvention(code, value); - Tmp result = cCallResult(value, 0); - Arg originalResult = result ? inst.args[1] : Arg(); + Vector results; + Vector originalResults; + for (unsigned i = 0; i < cCallResultCount(code, value); ++i) { + results.append(cCallResult(code, value, i)); + originalResults.append(inst.args[i + 2]); + } Vector pairs; for (unsigned i = 0; i < destinations.size(); ++i) { Value* child = value->child(i); - Arg src = inst.args[result ? (i >= 1 ? i + 1 : i) : i ]; + Arg src = inst.args[i >= 1 ? i + results.size() + 1 : i + 1]; Arg dst = destinations[i]; Width width = widthForType(child->type()); pairs.append(ShufflePair(src, dst, width)); @@ -217,8 +222,10 @@ void lowerAfterRegAlloc(Code& code) // Also need to save all live registers. Don't need to worry about the result // register. - if (originalResult.isReg()) - regsToSave.remove(originalResult.reg()); + for (Arg originalResult : originalResults) { + if (originalResult.isReg()) + regsToSave.remove(originalResult.reg()); + } Vector stackSlots; regsToSave.forEachWithWidth( [&] (Reg reg, Width width) { @@ -251,15 +258,16 @@ void lowerAfterRegAlloc(Code& code) ASSERT(stackSlot->byteSize() >= bytesForWidth(width)); pairs.append(ShufflePair(Arg::stack(stackSlot), arg, width)); }); - if (result) { - ShufflePair pair(result, originalResult, widthForType(value->type())); + for (unsigned i = 0; i < results.size(); ++i) { + Type type = value->type().isTuple() ? code.proc().typeAtOffset(value->type(), i) : value->type(); + ShufflePair pair(results[i], originalResults[i], widthForType(type)); pairs.append(pair); - } - // For finding scratch registers, we need to account for the possibility that - // the result is dead. - if (originalResult.isReg()) - postUsed.add(originalResult.reg(), IgnoreVectors); + // For finding scratch registers, we need to account for the possibility that + // the result is dead. + if (originalResults[i].isReg()) + postUsed.add(originalResults[i].reg(), IgnoreVectors); + } gpScratch = getScratches(postUsed, GP); fpScratch = getScratches(postUsed, FP); diff --git a/Source/JavaScriptCore/b3/air/AirLowerMacros.cpp b/Source/JavaScriptCore/b3/air/AirLowerMacros.cpp index 4e70dc140914..11b496c17b84 100644 --- a/Source/JavaScriptCore/b3/air/AirLowerMacros.cpp +++ b/Source/JavaScriptCore/b3/air/AirLowerMacros.cpp @@ -34,6 +34,7 @@ #include "AirInsertionSet.h" #include "AirPhaseScope.h" #include "B3CCallValue.h" +#include "B3ProcedureInlines.h" #include "B3ValueInlines.h" namespace JSC { namespace B3 { namespace Air { @@ -53,14 +54,14 @@ void lowerMacros(Code& code) Vector destinations = computeCCallingConvention(code, value); - unsigned resultCount = cCallResultCount(value); - ASSERT_IMPLIES(is64Bit(), resultCount <= 1); + unsigned resultCount = cCallResultCount(code, value); Vector shufflePairs; bool hasRegisterSource = false; unsigned offset = 1; auto addNextPair = [&](Width width) { - ShufflePair pair(inst.args[offset + resultCount], destinations[offset], width); + // Skip the special Arg in CCall + ShufflePair pair(inst.args[offset + resultCount + 1], destinations[offset], width); shufflePairs.append(pair); hasRegisterSource |= pair.src().isReg(); ++offset; @@ -68,7 +69,7 @@ void lowerMacros(Code& code) for (unsigned i = 1; i < value->numChildren(); ++i) { Value* child = value->child(i); for (unsigned j = 0; j < cCallArgumentRegisterCount(child); j++) - addNextPair(cCallArgumentRegisterWidth(child)); + addNextPair(cCallArgumentRegisterWidth(child->type())); } ASSERT(offset = inst.args.size()); @@ -97,13 +98,11 @@ void lowerMacros(Code& code) } // Indicate that we're using our original callee argument. - destinations[0] = inst.args[0]; + destinations[0] = inst.args[1]; // Save where the original instruction put its result. - Arg resultDst0 = resultCount >= 1 ? inst.args[1] : Arg(); -#if USE(JSVALUE32_64) - Arg resultDst1 = resultCount >= 2 ? inst.args[2] : Arg(); -#endif + Arg resultDst0 = resultCount >= 1 ? inst.args[2] : Arg(); + Arg resultDst1 = resultCount >= 2 ? inst.args[3] : Arg(); inst = buildCCall(code, inst.origin, destinations); if (oldKind.effects) @@ -111,26 +110,29 @@ void lowerMacros(Code& code) switch (value->type().kind()) { case Void: + break; case Tuple: + insertionSet.insert(instIndex + 1, Move, value, cCallResult(code, value, 0), resultDst0); + insertionSet.insert(instIndex + 1, Move, value, cCallResult(code, value, 1), resultDst1); break; case Float: - insertionSet.insert(instIndex + 1, MoveFloat, value, cCallResult(value, 0), resultDst0); + insertionSet.insert(instIndex + 1, MoveFloat, value, cCallResult(code, value, 0), resultDst0); break; case Double: - insertionSet.insert(instIndex + 1, MoveDouble, value, cCallResult(value, 0), resultDst0); + insertionSet.insert(instIndex + 1, MoveDouble, value, cCallResult(code, value, 0), resultDst0); break; case Int32: - insertionSet.insert(instIndex + 1, Move32, value, cCallResult(value, 0), resultDst0); + insertionSet.insert(instIndex + 1, Move32, value, cCallResult(code, value, 0), resultDst0); break; case Int64: - insertionSet.insert(instIndex + 1, Move, value, cCallResult(value, 0), resultDst0); + insertionSet.insert(instIndex + 1, Move, value, cCallResult(code, value, 0), resultDst0); #if USE(JSVALUE32_64) - insertionSet.insert(instIndex + 1, Move, value, cCallResult(value, 1), resultDst1); + insertionSet.insert(instIndex + 1, Move, value, cCallResult(code, value, 1), resultDst1); #endif break; case V128: ASSERT(is64Bit()); - insertionSet.insert(instIndex + 1, MoveVector, value, cCallResult(value, 0), resultDst0); + insertionSet.insert(instIndex + 1, MoveVector, value, cCallResult(code, value, 0), resultDst0); break; } }; diff --git a/Source/JavaScriptCore/b3/air/AirOpcode.opcodes b/Source/JavaScriptCore/b3/air/AirOpcode.opcodes index 97b686e92a3f..b3f44c7b16a3 100644 --- a/Source/JavaScriptCore/b3/air/AirOpcode.opcodes +++ b/Source/JavaScriptCore/b3/air/AirOpcode.opcodes @@ -2034,7 +2034,7 @@ custom Shuffle custom Patch # Instructions used for lowering C calls. These don't make it to Air generation. They get lowered to -# something else first. The origin Value must be a CCallValue. +# CCallSpecials first. The origin Value must be a CCallValue. custom CCall custom ColdCCall diff --git a/Source/JavaScriptCore/b3/testb3.h b/Source/JavaScriptCore/b3/testb3.h index 739da5ecfa3b..08a56c0ee5c9 100644 --- a/Source/JavaScriptCore/b3/testb3.h +++ b/Source/JavaScriptCore/b3/testb3.h @@ -959,6 +959,8 @@ void testCallSimplePure(int, int); void testCallFunctionWithHellaArguments(); void testCallFunctionWithHellaArguments2(); void testCallFunctionWithHellaArguments3(); +void testCallPairResult(int, int); +void testCallPairResultRare(int, int); void testReturnDouble(double value); void testReturnFloat(float value); void testMulNegArgArg(int, int); diff --git a/Source/JavaScriptCore/b3/testb3_3.cpp b/Source/JavaScriptCore/b3/testb3_3.cpp index 4076140aa824..d467d003c8e1 100644 --- a/Source/JavaScriptCore/b3/testb3_3.cpp +++ b/Source/JavaScriptCore/b3/testb3_3.cpp @@ -3999,7 +3999,10 @@ void addCallTests(const char* filter, Deque>>& tasks) RUN(testCallFunctionWithHellaArguments()); RUN(testCallFunctionWithHellaArguments2()); RUN(testCallFunctionWithHellaArguments3()); - + + RUN(testCallPairResult(1, 100)); + RUN(testCallPairResultRare(1, 100)); + RUN(testReturnDouble(0.0)); RUN(testReturnDouble(negativeZero())); RUN(testReturnDouble(42.5)); diff --git a/Source/JavaScriptCore/b3/testb3_5.cpp b/Source/JavaScriptCore/b3/testb3_5.cpp index 62f97e0895b3..1abb969f2fc1 100644 --- a/Source/JavaScriptCore/b3/testb3_5.cpp +++ b/Source/JavaScriptCore/b3/testb3_5.cpp @@ -2124,6 +2124,75 @@ void testCallFunctionWithHellaArguments3() CHECK(invoke(*compilation) == 7967500); } +struct IntPtrPair { + intptr_t a; + intptr_t b; +}; +extern "C" { +static JSC_DECLARE_JIT_OPERATION_WITHOUT_WTF_INTERNAL(simplePairFunction, IntPtrPair, (int, int)); +} +JSC_DEFINE_JIT_OPERATION(simplePairFunction, IntPtrPair, (int a, int b)) +{ + return { a - b, a * b }; +} + +void testCallPairResult(int a, int b) +{ + Procedure proc; + BasicBlock* root = proc.addBlock(); + + auto tupleType = proc.addTuple({ registerType(), registerType() }); + + CCallValue* call = root->appendNew(proc, tupleType, Origin(), + root->appendNew(proc, Origin(), tagCFunction(simplePairFunction)), + root->appendNew(proc, Origin(), a), + root->appendNew(proc, Origin(), b)); + + Value* sum = root->appendNew(proc, Sub, Origin(), + root->appendNew(proc, Origin(), registerType(), call, 0), + root->appendNew(proc, Origin(), registerType(), call, 1)); + root->appendNewControlValue(proc, Return, Origin(), sum); + + CHECK(isIdentical(compileAndRun(proc), (a - b) - (a * b))); +} + +void testCallPairResultRare(int a, int b) +{ + Procedure proc; + BasicBlock* root = proc.addBlock(); + BasicBlock* call = proc.addBlock(); + BasicBlock* ret0 = proc.addBlock(); + + + { + root->appendNewControlValue( + proc, Branch, Origin(), + root->appendNew(proc, Origin(), GPRInfo::argumentGPR0), + FrequentedBlock(call, FrequencyClass::Rare), + FrequentedBlock(ret0)); + } + + { + auto tupleType = proc.addTuple({ registerType(), registerType() }); + CCallValue* cCall = call->appendNew(proc, tupleType, Origin(), + call->appendNew(proc, Origin(), tagCFunction(simplePairFunction)), + call->appendNew(proc, Origin(), a), + call->appendNew(proc, Origin(), b)); + + Value* sum = call->appendNew(proc, Sub, Origin(), + call->appendNew(proc, Origin(), registerType(), cCall, 0), + call->appendNew(proc, Origin(), registerType(), cCall, 1)); + call->appendNewControlValue(proc, Return, Origin(), sum); + } + + { + ret0->appendNewControlValue(proc, Return, Origin(), + ret0->appendNew(proc, Origin(), 0)); + } + + CHECK(isIdentical(compileAndRun(proc, 1), (a - b) - (a * b))); +} + void testReturnDouble(double value) { Procedure proc; diff --git a/Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h b/Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h index fc81c732af0c..ad36c8324a64 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h +++ b/Source/JavaScriptCore/dfg/DFGAbstractInterpreter.h @@ -248,6 +248,14 @@ class AbstractInterpreter { m_state.setShouldTryConstantFolding(true); } + void setTupleConstant(Node* node, unsigned index, FrozenValue value) + { + AbstractValue& abstractValue = m_state.forTupleNode(node, index); + abstractValue.set(m_graph, value, m_state.structureClobberState()); + abstractValue.fixTypeForRepresentation(m_graph, node); + m_state.setShouldTryConstantFolding(true); + } + ALWAYS_INLINE void filterByType(Edge& edge, SpeculatedType type); void verifyEdge(Node*, Edge); diff --git a/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h b/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h index d8fcf9d67a7a..a3e550775616 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h +++ b/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h @@ -389,6 +389,13 @@ bool AbstractInterpreter::executeEffects(unsigned clobberLimi m_state.setShouldTryConstantFolding(true); break; } + + case ExtractFromTuple: { + setForNode(node, m_state.forTupleNode(node->child1(), node->extractOffset())); + if (forNode(node).value()) + m_state.setShouldTryConstantFolding(true); + break; + } case ExtractCatchLocal: case ExtractOSREntryLocal: { @@ -4524,11 +4531,13 @@ bool AbstractInterpreter::executeEffects(unsigned clobberLimi case EnumeratorNextUpdateIndexAndMode: { ArrayMode arrayMode = node->arrayMode(); - if (node->enumeratorMetadata() == JSPropertyNameEnumerator::OwnStructureMode && m_graph.varArgChild(node, 0).useKind() == CellUse) { - // Do nothing. - } else if (node->enumeratorMetadata() != JSPropertyNameEnumerator::IndexedMode) + if (node->enumeratorMetadata() == JSPropertyNameEnumerator::OwnStructureMode && m_graph.varArgChild(node, 0).useKind() == CellUse) + setTupleConstant(node, 1, jsNumber(static_cast(JSPropertyNameEnumerator::OwnStructureMode))); + else if (node->enumeratorMetadata() != JSPropertyNameEnumerator::IndexedMode) { + m_state.setNonCellTypeForTupleNode(node, 1, SpecInt32Only); clobberWorld(); - else { + } else { + setTupleConstant(node, 1, jsNumber(static_cast(JSPropertyNameEnumerator::IndexedMode))); switch (arrayMode.type()) { case Array::Int32: case Array::Double: @@ -4544,25 +4553,7 @@ bool AbstractInterpreter::executeEffects(unsigned clobberLimi } } } - setNonCellTypeForNode(node, SpecBytecodeNumber); - break; - } - - case EnumeratorNextExtractMode: { - if (node->child1()->enumeratorMetadata() == JSPropertyNameEnumerator::IndexedMode) { - setConstant(node, jsNumber(static_cast(JSPropertyNameEnumerator::IndexedMode))); - break; - } - - if (node->child1()->enumeratorMetadata() == JSPropertyNameEnumerator::OwnStructureMode && m_graph.varArgChild(node->child1().node(), 0).useKind() == CellUse) { - setConstant(node, jsNumber(static_cast(JSPropertyNameEnumerator::OwnStructureMode))); - break; - } - - FALLTHROUGH; - } - case EnumeratorNextExtractIndex: { - setNonCellTypeForNode(node, SpecInt32Only); + m_state.setNonCellTypeForTupleNode(node, 0, SpecInt32Only); break; } diff --git a/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp index 650ccf54b7cd..52afb1fb8189 100644 --- a/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp +++ b/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.cpp @@ -36,12 +36,14 @@ namespace JSC { namespace DFG { AtTailAbstractState::AtTailAbstractState(Graph& graph) : m_graph(graph) , m_valuesAtTailMap(m_graph) + , m_tupleAbstractValues(m_graph) { for (BasicBlock* block : graph.blocksInNaturalOrder()) { auto& valuesAtTail = m_valuesAtTailMap.at(block); valuesAtTail.clear(); for (auto& valueAtTailPair : block->ssa->valuesAtTail) valuesAtTail.add(valueAtTailPair.node, valueAtTailPair.value); + m_tupleAbstractValues.at(block).grow(m_graph.m_tupleData.size()); } } diff --git a/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h b/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h index 438e16de8595..288e6cec91d0 100644 --- a/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h +++ b/Source/JavaScriptCore/dfg/DFGAtTailAbstractState.h @@ -136,6 +136,83 @@ class AtTailAbstractState { { makeHeapTopForNode(edge.node()); } + + ALWAYS_INLINE AbstractValue& forTupleNode(NodeFlowProjection node, unsigned index) + { + ASSERT(index < node->tupleSize()); + return m_tupleAbstractValues.at(m_block).at(node->tupleOffset() + index); + } + + ALWAYS_INLINE AbstractValue& forTupleNode(Edge edge, unsigned index) + { + return forTupleNode(edge.node(), index); + } + + ALWAYS_INLINE void clearForTupleNode(NodeFlowProjection node, unsigned index) + { + forTupleNode(node, index).clear(); + } + + ALWAYS_INLINE void clearForTupleNode(Edge edge, unsigned index) + { + clearForTupleNode(edge.node(), index); + } + + template + ALWAYS_INLINE void setForTupleNode(NodeFlowProjection node, unsigned index, Arguments&&... arguments) + { + forTupleNode(node, index).set(m_graph, std::forward(arguments)...); + } + + template + ALWAYS_INLINE void setForTupleNode(Edge edge, unsigned index, Arguments&&... arguments) + { + setForTupleNode(edge.node(), index, std::forward(arguments)...); + } + + template + ALWAYS_INLINE void setTypeForTupleNode(NodeFlowProjection node, unsigned index, Arguments&&... arguments) + { + forTupleNode(node, index).setType(m_graph, std::forward(arguments)...); + } + + template + ALWAYS_INLINE void setTypeForTupleNode(Edge edge, unsigned index, Arguments&&... arguments) + { + setTypeForTupleNode(edge.node(), index, std::forward(arguments)...); + } + + template + ALWAYS_INLINE void setNonCellTypeForTupleNode(NodeFlowProjection node, unsigned index, Arguments&&... arguments) + { + forTupleNode(node, index).setNonCellType(std::forward(arguments)...); + } + + template + ALWAYS_INLINE void setNonCellTypeForTupleNode(Edge edge, unsigned index, Arguments&&... arguments) + { + setNonCellTypeForTupleNode(edge.node(), index, std::forward(arguments)...); + } + + ALWAYS_INLINE void makeBytecodeTopForTupleNode(NodeFlowProjection node, unsigned index) + { + forTupleNode(node, index).makeBytecodeTop(); + } + + ALWAYS_INLINE void makeBytecodeTopForTupleNode(Edge edge, unsigned index) + { + makeBytecodeTopForTupleNode(edge.node(), index); + } + + ALWAYS_INLINE void makeHeapTopForTupleNode(NodeFlowProjection node, unsigned index) + { + forTupleNode(node, index).makeHeapTop(); + } + + ALWAYS_INLINE void makeHeapTopForTupleNode(Edge edge, unsigned index) + { + makeHeapTopForTupleNode(edge.node(), index); + } unsigned size() const { return m_block->valuesAtTail.size(); } unsigned numberOfArguments() const { return m_block->valuesAtTail.numberOfArguments(); } @@ -181,6 +258,7 @@ class AtTailAbstractState { private: Graph& m_graph; BlockMap> m_valuesAtTailMap; + BlockMap> m_tupleAbstractValues; BasicBlock* m_block { nullptr }; bool m_trustEdgeProofs { false }; }; diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp index 4f49d7f4eaf4..fc1cd9e88efc 100644 --- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp +++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp @@ -817,6 +817,10 @@ class ByteCodeParser { m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit); m_currentBlock->append(node); + if (node->isTuple()) { + node->setTupleOffset(m_graph.m_tupleData.size()); + m_graph.m_tupleData.grow(m_graph.m_tupleData.size() + node->tupleSize()); + } if (clobbersExitState(m_graph, node)) m_exitOK = false; return node; @@ -8931,12 +8935,14 @@ void ByteCodeParser::parseBlock(unsigned limit) addVarArgChild(nullptr); // storage for IndexedMode only. Node* updatedIndexAndMode = addToGraph(Node::VarArg, EnumeratorNextUpdateIndexAndMode, OpInfo(arrayMode.asWord()), OpInfo(seenModes)); - Node* updatedMode = addToGraph(EnumeratorNextExtractMode, updatedIndexAndMode); - set(bytecode.m_mode, updatedMode); - - Node* updatedIndex = addToGraph(EnumeratorNextExtractIndex, updatedIndexAndMode); + Node* updatedIndex = addToGraph(ExtractFromTuple, OpInfo(0), updatedIndexAndMode); + updatedIndex->setResult(NodeResultInt32); set(bytecode.m_index, updatedIndex); + Node* updatedMode = addToGraph(ExtractFromTuple, OpInfo(1), updatedIndexAndMode); + updatedMode->setResult(NodeResultInt32); + set(bytecode.m_mode, updatedMode); + set(bytecode.m_propertyName, addToGraph(EnumeratorNextUpdatePropertyName, OpInfo(), OpInfo(seenModes), updatedIndex, updatedMode, enumerator)); NEXT_OPCODE(op_enumerator_next); diff --git a/Source/JavaScriptCore/dfg/DFGClobberize.h b/Source/JavaScriptCore/dfg/DFGClobberize.h index 0309804dc2a0..3b5c676bff30 100644 --- a/Source/JavaScriptCore/dfg/DFGClobberize.h +++ b/Source/JavaScriptCore/dfg/DFGClobberize.h @@ -359,9 +359,8 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu return; } - case EnumeratorNextExtractMode: - case EnumeratorNextExtractIndex: { - def(PureValue(node)); + case ExtractFromTuple: { + def(PureValue(node, node->extractOffset())); return; } diff --git a/Source/JavaScriptCore/dfg/DFGDoesGC.cpp b/Source/JavaScriptCore/dfg/DFGDoesGC.cpp index 275cbc64bae1..a8cdeff8dee3 100644 --- a/Source/JavaScriptCore/dfg/DFGDoesGC.cpp +++ b/Source/JavaScriptCore/dfg/DFGDoesGC.cpp @@ -519,8 +519,7 @@ bool doesGC(Graph& graph, Node* node) case ResolveRope: return true; - case EnumeratorNextExtractMode: - case EnumeratorNextExtractIndex: + case ExtractFromTuple: return false; case PutByValDirect: diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp index fd18c9c8d156..aa65ca18df67 100644 --- a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp @@ -2418,11 +2418,15 @@ class FixupPhase : public Phase { fixEdge(index); fixEdge(m_graph.varArgChild(node, 2)); fixEdge(m_graph.varArgChild(node, 3)); + + m_graph.m_tupleData.at(node->tupleOffset()).resultFlags = NodeResultInt32; + m_graph.m_tupleData.at(node->tupleOffset() + 1).resultFlags = NodeResultInt32; break; } - case EnumeratorNextExtractIndex: - case EnumeratorNextExtractMode: { + case ExtractFromTuple: { + node->setResult(m_graph.m_tupleData.at(node->tupleIndex()).resultFlags); + ASSERT(node->hasResult()); break; } diff --git a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h index 64655b463410..eb6d92b4eca2 100644 --- a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h +++ b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h @@ -147,6 +147,14 @@ class GenerationInfo { // Get the node that produced this value. Node* node() { return m_node; } + + void initFromTupleResult(Node* newNode) + { + ASSERT(m_useCount == 1); + ASSERT(newNode->child1().node() == m_node); + m_node = newNode; + m_useCount = m_node->refCount(); + } void noticeOSRBirth(VariableEventStreamBuilder& stream, Node* node, VirtualRegister virtualRegister) { diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp index 8cf3fdda71cc..07aed18ca1b4 100644 --- a/Source/JavaScriptCore/dfg/DFGGraph.cpp +++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp @@ -391,6 +391,8 @@ void Graph::dump(PrintStream& out, const char* prefixStr, Node* node, DumpContex out.print(comma, *node->putByStatus()); if (node->hasEnumeratorMetadata()) out.print(comma, "enumeratorModes = ", node->enumeratorMetadata().toRaw()); + if (node->hasExtractOffset()) + out.print(comma, "<<", node->extractOffset()); if (node->isJump()) out.print(comma, "T:", *node->targetBlock()); if (node->isBranch()) @@ -765,7 +767,9 @@ class RefCountCalculator { for (unsigned phiIndex = block->phis.size(); phiIndex--;) block->phis[phiIndex]->setRefCount(0); } - + for (auto& tupleData : m_graph.m_tupleData) + tupleData.refCount = 0; + // Now find the roots: // - Nodes that are must-generate. // - Nodes that are reachable from type checks. @@ -820,8 +824,7 @@ class RefCountCalculator { // will just not have gotten around to it. if (edge.isProved() || edge.willNotHaveCheck()) return; - if (!edge->postfixRef()) - m_worklist.append(edge.node()); + countNode(edge.node()); } void countNode(Node* node) @@ -831,11 +834,14 @@ class RefCountCalculator { m_worklist.append(node); } - void countEdge(Node*, Edge edge) + void countEdge(Node* node, Edge edge) { // Don't count edges that are already counted for their type checks. if (!(edge.isProved() || edge.willNotHaveCheck())) return; + // Tuples are special and have a reference count for each result. + if (node->op() == ExtractFromTuple) + m_graph.m_tupleData.at(node->tupleIndex()).refCount++; countNode(edge.node()); } diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h index 545912dfe0de..56bbccd9dd0b 100644 --- a/Source/JavaScriptCore/dfg/DFGGraph.h +++ b/Source/JavaScriptCore/dfg/DFGGraph.h @@ -221,13 +221,14 @@ class Graph final : public virtual Scannable { template Node* addNode(Params... params) { - return m_nodes.addNew(params...); + Node* node = m_nodes.addNew(params...); + return node; } template Node* addNode(SpeculatedType type, Params... params) { - Node* node = m_nodes.addNew(params...); + Node* node = addNode(params...); node->predict(type); return node; } @@ -1176,6 +1177,14 @@ class Graph final : public virtual Scannable { Vector m_roots; Vector m_varArgChildren; + struct TupleData { + uint16_t refCount { 0 }; + uint16_t resultFlags { 0 }; + VirtualRegister virtualRegister; + }; + + Vector m_tupleData; + // UnlinkedSimpleJumpTable/UnlinkedStringJumpTable are kept by UnlinkedCodeBlocks retained by baseline CodeBlocks handled by DFG / FTL. Vector m_unlinkedSwitchJumpTables; Vector m_switchJumpTables; diff --git a/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp index ace0b1b6479e..462c4ef899ad 100644 --- a/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp +++ b/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp @@ -41,6 +41,7 @@ InPlaceAbstractState::InPlaceAbstractState(Graph& graph) : m_graph(graph) , m_abstractValues(*graph.m_abstractValuesCache) , m_variables(OperandsLike, graph.block(0)->variablesAtHead) + , m_tupleAbstractValues(graph.m_tupleData.size()) , m_block(nullptr) { } diff --git a/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h b/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h index c5d026873e01..9081fc4c05ff 100644 --- a/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h +++ b/Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h @@ -156,6 +156,101 @@ class InPlaceAbstractState { makeHeapTopForNode(edge.node()); } + ALWAYS_INLINE AbstractValue& forTupleNode(NodeFlowProjection node, unsigned index) + { + ASSERT(index < node->tupleSize()); + return fastForward(m_tupleAbstractValues.at(node->tupleOffset() + index)); + } + + ALWAYS_INLINE AbstractValue& forTupleNode(Edge edge, unsigned index) + { + return forTupleNode(edge.node(), index); + } + + ALWAYS_INLINE void clearForTupleNode(NodeFlowProjection node, unsigned index) + { + ASSERT(index < node->tupleSize()); + AbstractValue& value = m_tupleAbstractValues.at(node->tupleOffset() + index); + value.clear(); + value.m_effectEpoch = m_effectEpoch; + } + + ALWAYS_INLINE void clearForTupleNode(Edge edge, unsigned index) + { + clearForTupleNode(edge.node(), index); + } + + template + ALWAYS_INLINE void setForTupleNode(NodeFlowProjection node, unsigned index, Arguments&&... arguments) + { + ASSERT(index < node->tupleSize()); + AbstractValue& value = m_tupleAbstractValues.at(node->tupleOffset() + index); + value.set(m_graph, std::forward(arguments)...); + value.m_effectEpoch = m_effectEpoch; + } + + template + ALWAYS_INLINE void setForTupleNode(Edge edge, unsigned index, Arguments&&... arguments) + { + setForTupleNode(edge.node(), index, std::forward(arguments)...); + } + + template + ALWAYS_INLINE void setTypeForTupleNode(NodeFlowProjection node, unsigned index, Arguments&&... arguments) + { + ASSERT(index < node->tupleSize()); + AbstractValue& value = m_tupleAbstractValues.at(node->tupleOffset() + index); + value.setType(m_graph, std::forward(arguments)...); + value.m_effectEpoch = m_effectEpoch; + } + + template + ALWAYS_INLINE void setTypeForTupleNode(Edge edge, unsigned index, Arguments&&... arguments) + { + setTypeForTupleNode(edge.node(), index, std::forward(arguments)...); + } + + template + ALWAYS_INLINE void setNonCellTypeForTupleNode(NodeFlowProjection node, unsigned index, Arguments&&... arguments) + { + ASSERT(index < node->tupleSize()); + AbstractValue& value = m_tupleAbstractValues.at(node->tupleOffset() + index); + value.setNonCellType(std::forward(arguments)...); + value.m_effectEpoch = m_effectEpoch; + } + + template + ALWAYS_INLINE void setNonCellTypeForTupleNode(Edge edge, unsigned index, Arguments&&... arguments) + { + setNonCellTypeForTupleNode(edge.node(), index, std::forward(arguments)...); + } + + ALWAYS_INLINE void makeBytecodeTopForTupleNode(NodeFlowProjection node, unsigned index) + { + ASSERT(index < node->tupleSize()); + AbstractValue& value = m_tupleAbstractValues.at(node->tupleOffset() + index); + value.makeBytecodeTop(); + value.m_effectEpoch = m_effectEpoch; + } + + ALWAYS_INLINE void makeBytecodeTopForTupleNode(Edge edge, unsigned index) + { + makeBytecodeTopForTupleNode(edge.node(), index); + } + + ALWAYS_INLINE void makeHeapTopForTupleNode(NodeFlowProjection node, unsigned index) + { + ASSERT(index < node->tupleSize()); + AbstractValue& value = m_tupleAbstractValues.at(node->tupleOffset() + index); + value.makeHeapTop(); + value.m_effectEpoch = m_effectEpoch; + } + + ALWAYS_INLINE void makeHeapTopForTupleNode(Edge edge, unsigned index) + { + makeHeapTopForTupleNode(edge.node(), index); + } + Operands& variablesForDebugging(); unsigned size() const { return m_variables.size(); } @@ -280,6 +375,7 @@ class InPlaceAbstractState { FlowMap& m_abstractValues; Operands m_variables; + Vector m_tupleAbstractValues; FastBitVector m_activeVariables; BasicBlock* m_block; diff --git a/Source/JavaScriptCore/dfg/DFGMayExit.cpp b/Source/JavaScriptCore/dfg/DFGMayExit.cpp index c1651e677ecf..7db54a5c0ee3 100644 --- a/Source/JavaScriptCore/dfg/DFGMayExit.cpp +++ b/Source/JavaScriptCore/dfg/DFGMayExit.cpp @@ -111,8 +111,7 @@ ExitMode mayExitImpl(Graph& graph, Node* node, StateType& state) case FilterDeleteByStatus: case FilterCheckPrivateBrandStatus: case FilterSetPrivateBrandStatus: - case EnumeratorNextExtractMode: - case EnumeratorNextExtractIndex: + case ExtractFromTuple: break; case EnumeratorNextUpdatePropertyName: diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h index 2f648a7954b6..9eacaed9fc2c 100644 --- a/Source/JavaScriptCore/dfg/DFGNode.h +++ b/Source/JavaScriptCore/dfg/DFGNode.h @@ -1558,6 +1558,52 @@ struct Node { return Edge(this, defaultUseKind()); } + bool isTuple() const + { + return op() == EnumeratorNextUpdateIndexAndMode; + } + + void setTupleOffset(unsigned tupleOffset) + { + m_virtualRegister = virtualRegisterForLocal(tupleOffset); + } + + // This is the start of the tuple in the graph's/phases' various tuple buffers. + unsigned tupleOffset() const + { + ASSERT(isTuple()); + return m_virtualRegister.toLocal(); + } + + bool hasExtractOffset() const + { + return op() == ExtractFromTuple; + } + + unsigned extractOffset() const + { + ASSERT(hasExtractOffset()); + ASSERT(m_opInfo.as() < const_cast(this)->child1()->tupleSize()); + return m_opInfo.as(); + } + + unsigned tupleIndex() const + { + return const_cast(this)->child1()->tupleOffset() + extractOffset(); + } + + unsigned tupleSize() const + { + ASSERT(isTuple()); + switch (op()) { + case EnumeratorNextUpdateIndexAndMode: + return 2; + default: + break; + } + RELEASE_ASSERT_NOT_REACHED(); + } + bool isJump() { return op() == Jump; @@ -2513,20 +2559,20 @@ struct Node { bool hasVirtualRegister() { - return m_virtualRegister.isValid(); + return m_virtualRegister.isValid() && !isTuple(); } VirtualRegister virtualRegister() { ASSERT(hasResult()); - ASSERT(m_virtualRegister.isValid()); + ASSERT(hasVirtualRegister()); return m_virtualRegister; } void setVirtualRegister(VirtualRegister virtualRegister) { ASSERT(hasResult()); - ASSERT(!m_virtualRegister.isValid()); + ASSERT(!m_virtualRegister.isValid() && !isTuple()); m_virtualRegister = virtualRegister; } @@ -3384,7 +3430,7 @@ struct Node { unsigned m_index { std::numeric_limits::max() }; unsigned m_op : 10; // real type is NodeType unsigned m_flags : 21; - // The virtual register number (spill location) associated with this . + // The virtual register number (spill location) associated with this node. For tuples this is the offset into the graph's out of line tuple buffers. VirtualRegister m_virtualRegister; // The number of uses of the result of this operation (+1 for 'must generate' nodes, which have side-effects). unsigned m_refCount; diff --git a/Source/JavaScriptCore/dfg/DFGNodeType.h b/Source/JavaScriptCore/dfg/DFGNodeType.h index 1b744820ecdf..63cab11b88df 100644 --- a/Source/JavaScriptCore/dfg/DFGNodeType.h +++ b/Source/JavaScriptCore/dfg/DFGNodeType.h @@ -90,6 +90,7 @@ namespace JSC { namespace DFG { macro(Phi, 0) \ macro(Flush, NodeMustGenerate) \ macro(PhantomLocal, NodeMustGenerate) \ + macro(ExtractFromTuple, 0) \ \ /* Hint that this is where bytecode thinks is a good place to OSR. Note that this */\ /* will exist even in inlined loops. This has no execution semantics but it must */\ @@ -517,9 +518,7 @@ namespace JSC { namespace DFG { macro(HasIndexedProperty, NodeMustGenerate | NodeResultBoolean | NodeHasVarArgs) \ /* For-in enumeration opcodes */\ macro(GetPropertyEnumerator, NodeMustGenerate | NodeResultJS) \ - macro(EnumeratorNextUpdateIndexAndMode, NodeResultJS | NodeHasVarArgs) \ - macro(EnumeratorNextExtractMode, NodeResultInt32) \ - macro(EnumeratorNextExtractIndex, NodeResultInt32) \ + macro(EnumeratorNextUpdateIndexAndMode, NodeHasVarArgs) \ macro(EnumeratorNextUpdatePropertyName, NodeResultJS) \ macro(EnumeratorGetByVal, NodeResultJS | NodeHasVarArgs | NodeMustGenerate) \ macro(EnumeratorInByVal, NodeResultBoolean | NodeHasVarArgs | NodeMustGenerate) \ diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp index 790477f01f93..ca316e1bd6d2 100644 --- a/Source/JavaScriptCore/dfg/DFGOperations.cpp +++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp @@ -2353,7 +2353,7 @@ JSC_DEFINE_JIT_OPERATION(operationGetPropertyEnumeratorCell, JSCell*, (JSGlobalO RELEASE_AND_RETURN(scope, propertyNameEnumerator(globalObject, base)); } -JSC_DEFINE_JIT_OPERATION(operationEnumeratorNextUpdateIndexAndMode, EncodedJSValue, (JSGlobalObject* globalObject, EncodedJSValue baseValue, uint32_t index, int32_t modeNumber, JSPropertyNameEnumerator* enumerator)) +JSC_DEFINE_JIT_OPERATION(operationEnumeratorNextUpdateIndexAndMode, UGPRPair, (JSGlobalObject* globalObject, EncodedJSValue baseValue, uint32_t index, int32_t modeNumber, JSPropertyNameEnumerator* enumerator)) { VM& vm = globalObject->vm(); CallFrame* callFrame = DECLARE_CALL_FRAME(vm); @@ -2375,13 +2375,7 @@ JSC_DEFINE_JIT_OPERATION(operationEnumeratorNextUpdateIndexAndMode, EncodedJSVal RETURN_IF_EXCEPTION(scope, { }); } -#if USE(JSVALUE64) - JSValue result = bitwise_cast(static_cast(mode) << 32 | index | JSValue::DoubleEncodeOffset); -#else - JSValue result = JSValue(mode, index); -#endif - ASSERT(result.isDouble()); - return JSValue::encode(result); + return makeUGPRPair(index, static_cast(mode)); } JSC_DEFINE_JIT_OPERATION(operationEnumeratorNextUpdatePropertyName, JSString*, (JSGlobalObject* globalObject, uint32_t index, int32_t modeNumber, JSPropertyNameEnumerator* enumerator)) diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h index b5c719de0ed2..f9d001ffe6bd 100644 --- a/Source/JavaScriptCore/dfg/DFGOperations.h +++ b/Source/JavaScriptCore/dfg/DFGOperations.h @@ -50,6 +50,20 @@ namespace DFG { struct OSRExitBase; + +#if USE(JSVALUE64) +struct UGPRPair { + // We carefully use UCPURegister so both parameters are returned in their own registers. + UCPURegister first; + UCPURegister second; +}; +constexpr UGPRPair makeUGPRPair(UCPURegister first, UCPURegister second) { return { first, second }; } +#else +using UGPRPair = uint64_t; +constexpr UGPRPair makeUGPRPair(UCPURegister first, UCPURegister second) { return static_cast(second) << 32 | first; } +#endif + + JSC_DECLARE_JIT_OPERATION(operationStringFromCharCode, JSCell*, (JSGlobalObject*, int32_t)); JSC_DECLARE_JIT_OPERATION(operationStringFromCharCodeUntyped, EncodedJSValue, (JSGlobalObject*, EncodedJSValue)); @@ -118,7 +132,7 @@ JSC_DECLARE_JIT_OPERATION(operationHasIndexedProperty, size_t, (JSGlobalObject*, JSC_DECLARE_JIT_OPERATION(operationHasEnumerableIndexedProperty, size_t, (JSGlobalObject*, JSCell*, int32_t)); JSC_DECLARE_JIT_OPERATION(operationGetPropertyEnumerator, JSCell*, (JSGlobalObject*, EncodedJSValue)); JSC_DECLARE_JIT_OPERATION(operationGetPropertyEnumeratorCell, JSCell*, (JSGlobalObject*, JSCell*)); -JSC_DECLARE_JIT_OPERATION(operationEnumeratorNextUpdateIndexAndMode, EncodedJSValue, (JSGlobalObject*, EncodedJSValue, uint32_t, int32_t, JSPropertyNameEnumerator*)); +JSC_DECLARE_JIT_OPERATION(operationEnumeratorNextUpdateIndexAndMode, UGPRPair, (JSGlobalObject*, EncodedJSValue, uint32_t, int32_t, JSPropertyNameEnumerator*)); JSC_DECLARE_JIT_OPERATION(operationEnumeratorNextUpdatePropertyName, JSString*, (JSGlobalObject*, uint32_t, int32_t, JSPropertyNameEnumerator*)); JSC_DECLARE_JIT_OPERATION(operationEnumeratorInByVal, EncodedJSValue, (JSGlobalObject*, EncodedJSValue, EncodedJSValue, uint32_t, int32_t)); JSC_DECLARE_JIT_OPERATION(operationEnumeratorHasOwnProperty, EncodedJSValue, (JSGlobalObject*, EncodedJSValue, EncodedJSValue, uint32_t, int32_t)); diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp index 958f88cdb6df..c1ce58d1495b 100644 --- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp @@ -42,7 +42,9 @@ class PredictionPropagationPhase : public Phase { public: PredictionPropagationPhase(Graph& graph) : Phase(graph, "prediction propagation") + , m_tupleSpeculations(graph.m_tupleData.size()) { + m_tupleSpeculations.fill(SpecNone); } bool run() @@ -128,6 +130,37 @@ class PredictionPropagationPhase : public Phase { return m_currentNode->predict(prediction); } + + bool setTuplePrediction(SpeculatedType prediction, unsigned index) + { + ASSERT(index < m_currentNode->tupleSize()); + + SpeculatedType& speculation = m_tupleSpeculations[m_currentNode->tupleOffset() + index]; + // setTuplePrediction() is used when we know that there is no way that we can change + // our minds about what the prediction is going to be. There is no semantic + // difference between setTuplePrediction() and mergeTupleSpeculation() other than the + // increased checking to validate this property. + ASSERT(speculation == SpecNone || speculation == prediction); + return mergeSpeculation(speculation, prediction); + } + + bool mergeTuplePrediction(SpeculatedType prediction, unsigned index) + { + ASSERT(index < m_currentNode->tupleSize()); + + SpeculatedType& speculation = m_tupleSpeculations[m_currentNode->tupleOffset() + index]; + return mergeSpeculation(speculation, prediction); + } + + template + bool setTuplePredictions(SpeculatedTypes... predictions) + { + unsigned index = 0; + bool updatedPrediction = false; + for (SpeculatedType prediction : { predictions... }) + updatedPrediction |= setTuplePrediction(prediction, index++); + return updatedPrediction; + } SpeculatedType speculatedDoubleTypeForPrediction(SpeculatedType value) { @@ -1250,9 +1283,9 @@ class PredictionPropagationPhase : public Phase { setPrediction(SpecObjectOther); break; - case EnumeratorNextExtractMode: - case EnumeratorNextExtractIndex: { - setPrediction(SpecInt32Only); + case ExtractFromTuple: { + // Use mergePrediction because ExtractFromTuple doesn't know if the prediction could change. + mergePrediction(m_tupleSpeculations[m_currentNode->tupleIndex()]); break; } @@ -1273,7 +1306,7 @@ class PredictionPropagationPhase : public Phase { } case EnumeratorNextUpdateIndexAndMode: { - setPrediction(SpecFullNumber); + setTuplePredictions(SpecInt32Only, SpecInt32Only); break; } @@ -1559,6 +1592,7 @@ class PredictionPropagationPhase : public Phase { } Vector m_dependentNodes; + Vector m_tupleSpeculations; Node* m_currentNode; bool m_changed { false }; PredictionPass m_pass { PrimaryPass }; // We use different logic for considering predictions depending on how far along we are in propagation. diff --git a/Source/JavaScriptCore/dfg/DFGSafeToExecute.h b/Source/JavaScriptCore/dfg/DFGSafeToExecute.h index 68d97cf66fc4..63adef4357a1 100644 --- a/Source/JavaScriptCore/dfg/DFGSafeToExecute.h +++ b/Source/JavaScriptCore/dfg/DFGSafeToExecute.h @@ -501,9 +501,7 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node, bool igno } case EnumeratorNextUpdateIndexAndMode: - // These technically don't have effects but they'll only ever follow a EnumeratorNextUpdateIndexAndMode so we might as well return false. - case EnumeratorNextExtractMode: - case EnumeratorNextExtractIndex: + case ExtractFromTuple: case EnumeratorNextUpdatePropertyName: case ToThis: case CreateThis: diff --git a/Source/JavaScriptCore/dfg/DFGScoreBoard.h b/Source/JavaScriptCore/dfg/DFGScoreBoard.h index 2bc4d759027f..efc7a93d0933 100644 --- a/Source/JavaScriptCore/dfg/DFGScoreBoard.h +++ b/Source/JavaScriptCore/dfg/DFGScoreBoard.h @@ -103,6 +103,7 @@ class ScoreBoard { if (!child) return; + ASSERT(!child->isTuple()); // Find the virtual register number for this child, increment its use count. uint32_t index = child->virtualRegister().toLocal(); ASSERT(m_used[index] != max()); diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp index 2a95ac1d5b4c..f346313c9491 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp @@ -14453,118 +14453,88 @@ void SpeculativeJIT::compileEnumeratorNextUpdateIndexAndMode(Node* node) GPRReg enumeratorGPR = enumerator.gpr(); if (node->enumeratorMetadata() == JSPropertyNameEnumerator::IndexedMode) { - JSValueRegsTemporary result(this); - GPRTemporary scratch(this, Reuse, index); - JSValueRegs resultRegs = result.regs(); + GPRTemporary newIndex(this, Reuse, index); + GPRTemporary scratch(this); speculationCheck(BadCache, JSValueSource(), node, branch32(NotEqual, Address(enumeratorGPR, JSPropertyNameEnumerator::endGenericPropertyIndexOffset()), TrustedImm32(0))); Label incrementLoop; Jump done; compileHasIndexedProperty(node, operationHasEnumerableIndexedProperty, scopedLambda()>([&] { + GPRReg newIndexGPR = newIndex.gpr(); + GPRReg scratchGPR = scratch.gpr(); - move(indexGPR, scratch.gpr()); + // This should always be elided because index is UseDef in the bytecode for enumerator_next but we leave the move here for clarity. + move(indexGPR, newIndexGPR); Jump initMode = branchTest32(Zero, modeGPR); incrementLoop = label(); - add32(TrustedImm32(1), scratch.gpr()); + add32(TrustedImm32(1), newIndexGPR); initMode.link(this); - done = branch32(AboveOrEqual, scratch.gpr(), Address(enumeratorGPR, JSPropertyNameEnumerator::indexedLengthOffset())); - return std::make_pair(scratch.gpr(), resultRegs.payloadGPR()); + done = branch32(AboveOrEqual, newIndexGPR, Address(enumeratorGPR, JSPropertyNameEnumerator::indexedLengthOffset())); + return std::make_pair(newIndexGPR, scratchGPR); })); - branchTest32(Zero, resultRegs.payloadGPR()).linkTo(incrementLoop, this); + branchTest32(Zero, scratch.gpr()).linkTo(incrementLoop, this); done.link(this); + if (m_graph.m_tupleData.at(node->tupleOffset() + 1).refCount) + move(TrustedImm32(static_cast(JSPropertyNameEnumerator::IndexedMode)), scratch.gpr()); -#if USE(JSVALUE64) - move(TrustedImm64(JSValue::DoubleEncodeOffset | static_cast(JSPropertyNameEnumerator::IndexedMode) << 32), resultRegs.payloadGPR()); - or64(scratch.gpr(), resultRegs.payloadGPR()); -#else - move(TrustedImm32(JSPropertyNameEnumerator::IndexedMode), resultRegs.tagGPR()); - move(scratch.gpr(), resultRegs.payloadGPR()); -#endif - - jsValueResult(resultRegs, node); + useChildren(node); + strictInt32TupleResultWithoutUsingChildren(newIndex.gpr(), node, 0); + strictInt32TupleResultWithoutUsingChildren(scratch.gpr(), node, 1); return; } if (node->enumeratorMetadata() == JSPropertyNameEnumerator::OwnStructureMode && baseEdge.useKind() == CellUse) { SpeculateCellOperand base(this, baseEdge); - JSValueRegsTemporary result(this); + GPRTemporary newIndex(this); + GPRTemporary newMode(this, Reuse, mode); GPRReg baseGPR = base.gpr(); - JSValueRegs resultRegs = result.regs(); + // Has the same structure as the enumerator. - load32(Address(baseGPR, JSCell::structureIDOffset()), resultRegs.payloadGPR()); - speculationCheck(BadCache, JSValueSource(), node, branch32(NotEqual, resultRegs.payloadGPR(), Address(enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset()))); + load32(Address(baseGPR, JSCell::structureIDOffset()), newIndex.gpr()); + speculationCheck(BadCache, JSValueSource(), node, branch32(NotEqual, newIndex.gpr(), Address(enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset()))); - load32(Address(enumeratorGPR, JSPropertyNameEnumerator::flagsOffset()), resultRegs.payloadGPR()); - and32(TrustedImm32(JSPropertyNameEnumerator::enumerationModeMask), resultRegs.payloadGPR()); - speculationCheck(BadCache, JSValueSource(), node, branch32(NotEqual, TrustedImm32(JSPropertyNameEnumerator::OwnStructureMode), resultRegs.payloadGPR())); + load32(Address(enumeratorGPR, JSPropertyNameEnumerator::flagsOffset()), newIndex.gpr()); + and32(TrustedImm32(JSPropertyNameEnumerator::enumerationModeMask), newIndex.gpr()); + speculationCheck(BadCache, JSValueSource(), node, branch32(NotEqual, TrustedImm32(JSPropertyNameEnumerator::OwnStructureMode), newIndex.gpr())); - move(indexGPR, resultRegs.payloadGPR()); + move(indexGPR, newIndex.gpr()); Jump initMode = branchTest32(Zero, modeGPR); - add32(TrustedImm32(1), resultRegs.payloadGPR()); + add32(TrustedImm32(1), newIndex.gpr()); initMode.link(this); -#if USE(JSVALUE64) - or64(TrustedImm64(JSValue::DoubleEncodeOffset | static_cast(JSPropertyNameEnumerator::OwnStructureMode) << 32), resultRegs.payloadGPR()); -#else - move(TrustedImm32(JSPropertyNameEnumerator::OwnStructureMode), resultRegs.tagGPR()); -#endif - jsValueResult(resultRegs, node); + if (m_graph.m_tupleData.at(node->tupleOffset() + 1).refCount) + move(TrustedImm32(static_cast(JSPropertyNameEnumerator::OwnStructureMode)), newMode.gpr()); + + useChildren(node); + strictInt32TupleResultWithoutUsingChildren(newIndex.gpr(), node, 0); + strictInt32TupleResultWithoutUsingChildren(newMode.gpr(), node, 1); return; } JSValueOperand base(this, baseEdge); +#if USE(JSVALUE64) + GPRTemporary newMode(this, Reuse, mode); +#endif JSValueRegs baseRegs = base.regs(); + flushRegisters(); - JSValueRegsFlushedCallResult result(this); - JSValueRegs resultRegs = result.regs(); - callOperation(operationEnumeratorNextUpdateIndexAndMode, resultRegs, LinkableConstant::globalObject(*this, node), baseRegs, indexGPR, modeGPR, enumeratorGPR); + GPRFlushedCallResult indexResult(this); + GPRFlushedCallResult2 modeResult(this); + setupArguments(LinkableConstant::globalObject(*this, node), baseRegs, indexGPR, modeGPR, enumeratorGPR); + appendCallSetResult(operationEnumeratorNextUpdateIndexAndMode, indexResult.gpr(), modeResult.gpr()); exceptionCheck(); - jsValueResult(resultRegs, node); -} - -void SpeculativeJIT::compileEnumeratorNextExtractIndex(Node* node) -{ - JSValueOperand updatedPair(this, node->child1()); - JSValueRegs pairRegs = updatedPair.jsValueRegs(); - GPRReg payloadGPR = pairRegs.payloadGPR(); - - GPRTemporary result(this); - GPRReg resultGPR = result.gpr(); - and32(TrustedImm32(std::numeric_limits::max()), payloadGPR, resultGPR); - - strictInt32Result(resultGPR, node); -} - -void SpeculativeJIT::compileEnumeratorNextExtractMode(Node* node) -{ - JSValueOperand updatedPair(this, node->child1()); - JSValueRegs pairRegs = updatedPair.jsValueRegs(); - GPRReg pairGPR = is64Bit() ? pairRegs.payloadGPR() : pairRegs.tagGPR(); - - GPRTemporary result(this); - GPRReg resultGPR = result.gpr(); - -#if CPU(ARM64) && USE(JSVALUE64) - extractUnsignedBitfield64(pairGPR, TrustedImm32(32), TrustedImm32(WTF::fastLog2(static_cast(JSPropertyNameEnumerator::enumerationModeMask + 1))), resultGPR); -#else -#if USE(JSVALUE64) - rshift64(pairGPR, TrustedImm32(32), resultGPR); -#else - move(pairGPR, resultGPR); -#endif - and32(TrustedImm32(JSPropertyNameEnumerator::enumerationModeMask), resultGPR); -#endif - - strictInt32Result(resultGPR, node); + useChildren(node); + strictInt32TupleResultWithoutUsingChildren(indexResult.gpr(), node, 0); + strictInt32TupleResultWithoutUsingChildren(modeResult.gpr(), node, 1); } void SpeculativeJIT::compileEnumeratorNextUpdatePropertyName(Node* node) @@ -16162,6 +16132,42 @@ void SpeculativeJIT::compileIdentity(Node* node) } } +void SpeculativeJIT::compileExtractFromTuple(Node* node) +{ + RELEASE_ASSERT(node->child1().useKind() == UntypedUse); + + ASSERT(m_graph.m_tupleData.at(node->tupleIndex()).virtualRegister == node->virtualRegister()); + VirtualRegister virtualRegister = node->virtualRegister(); + GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); +#if ASSERT_ENABLED + ASSERT(m_graph.m_tupleData.at(node->tupleIndex()).resultFlags == node->result()); + switch (node->result()) { + case NodeResultJS: + case NodeResultNumber: + ASSERT(info.isFormat(DataFormatJS)); + break; + case NodeResultDouble: + ASSERT(info.isFormat(DataFormatDouble) || info.isFormat(DataFormatJSDouble)); + break; + case NodeResultInt32: + ASSERT(info.isFormat(DataFormatInt32) || info.isFormat(DataFormatJSInt32)); + break; + case NodeResultBoolean: + ASSERT(info.isFormat(DataFormatBoolean) || info.isFormat(DataFormatJSBoolean)); + break; + case NodeResultStorage: + ASSERT(info.isFormat(DataFormatStorage)); + break; + + // FIXME: These are not supported because it wasn't exactly clear how to implement them and they are not currently used. + case NodeResultInt52: + default: + RELEASE_ASSERT_NOT_REACHED(); + } +#endif + info.initFromTupleResult(node); +} + void SpeculativeJIT::compileMiscStrictEq(Node* node) { JSValueOperand op1(this, node->child1(), ManualOperandSpeculation); diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h index da49983cc671..6f54e48c9d44 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h @@ -907,6 +907,32 @@ class SpeculativeJIT : public JITCompiler { generationInfo(node).initConstant(node, node->refCount()); } + void strictInt32TupleResultWithoutUsingChildren(GPRReg reg, Node* node, unsigned index, DataFormat format = DataFormatInt32) + { + ASSERT(index < node->tupleSize()); + unsigned refCount = m_graph.m_tupleData.at(node->tupleOffset() + index).refCount; + if (!refCount) + return; + ASSERT(refCount == 1); + VirtualRegister virtualRegister = m_graph.m_tupleData.at(node->tupleOffset() + index).virtualRegister; + GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); + + if (format == DataFormatInt32) { + jitAssertIsInt32(reg); + m_gprs.retain(reg, virtualRegister, SpillOrderInteger); + info.initInt32(node, refCount, reg); + } else { +#if USE(JSVALUE64) + RELEASE_ASSERT(format == DataFormatJSInt32); + jitAssertIsJSInt32(reg); + m_gprs.retain(reg, virtualRegister, SpillOrderJS); + info.initJSValue(node, refCount, reg, format); +#elif USE(JSVALUE32_64) + RELEASE_ASSERT_NOT_REACHED(); +#endif + } + } + template std::enable_if_t< FunctionTraits::hasResult, @@ -1515,9 +1541,8 @@ class SpeculativeJIT : public JITCompiler { void compileThrow(Node*); void compileThrowStaticError(Node*); + void compileExtractFromTuple(Node*); void compileEnumeratorNextUpdateIndexAndMode(Node*); - void compileEnumeratorNextExtractMode(Node*); - void compileEnumeratorNextExtractIndex(Node*); void compileEnumeratorNextUpdatePropertyName(Node*); void compileEnumeratorGetByVal(Node*); template @@ -2237,7 +2262,6 @@ class GPRFlushedCallResult : public GPRTemporary { } }; -#if USE(JSVALUE32_64) class GPRFlushedCallResult2 : public GPRTemporary { public: GPRFlushedCallResult2(SpeculativeJIT* jit) @@ -2245,7 +2269,6 @@ class GPRFlushedCallResult2 : public GPRTemporary { { } }; -#endif class FPRResult : public FPRTemporary { public: diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp index 3d51d23c3bdc..8cb75efd7a69 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp @@ -2161,6 +2161,11 @@ void SpeculativeJIT::compile(Node* node) break; } + case ExtractFromTuple: { + compileExtractFromTuple(node); + break; + } + case Inc: case Dec: compileIncOrDec(node); @@ -4041,16 +4046,6 @@ void SpeculativeJIT::compile(Node* node) break; } - case EnumeratorNextExtractMode: { - compileEnumeratorNextExtractMode(node); - break; - } - - case EnumeratorNextExtractIndex: { - compileEnumeratorNextExtractIndex(node); - break; - } - case EnumeratorNextUpdatePropertyName: { compileEnumeratorNextUpdatePropertyName(node); break; diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp index d20cbd1dab11..54434ae99ad6 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp @@ -3036,6 +3036,11 @@ void SpeculativeJIT::compile(Node* node) break; } + case ExtractFromTuple: { + compileExtractFromTuple(node); + break; + } + case Inc: case Dec: compileIncOrDec(node); @@ -5596,16 +5601,6 @@ void SpeculativeJIT::compile(Node* node) break; } - case EnumeratorNextExtractMode: { - compileEnumeratorNextExtractMode(node); - break; - } - - case EnumeratorNextExtractIndex: { - compileEnumeratorNextExtractIndex(node); - break; - } - case EnumeratorNextUpdatePropertyName: { compileEnumeratorNextUpdatePropertyName(node); break; diff --git a/Source/JavaScriptCore/dfg/DFGValidate.cpp b/Source/JavaScriptCore/dfg/DFGValidate.cpp index e1ac9a955ea2..88320dcd0cd8 100644 --- a/Source/JavaScriptCore/dfg/DFGValidate.cpp +++ b/Source/JavaScriptCore/dfg/DFGValidate.cpp @@ -47,7 +47,9 @@ class Validate { : m_graph(graph) , m_graphDumpMode(graphDumpMode) , m_graphDumpBeforePhase(graphDumpBeforePhase) + , m_myTupleRefCounts(m_graph.m_tupleData.size()) { + m_myTupleRefCounts.fill(0); } #define VALIDATE(context, assertion) do { \ @@ -138,6 +140,14 @@ class Validate { m_myRefCounts.find(edge.node())->value++; + if (node->op() == ExtractFromTuple) { + VALIDATE((node, edge), edge->isTuple()); + VALIDATE((node, edge), node->child1() == edge); + m_myTupleRefCounts.at(node->tupleIndex())++; + // Tuples edges don't obey the normal hasResult() rules for nodes so skip that logic below. + continue; + } + validateEdgeWithDoubleResultIfNecessary(node, edge); validateEdgeWithInt52ResultIfNecessary(node, edge); @@ -179,8 +189,13 @@ class Validate { continue; for (size_t i = 0; i < block->numNodes(); ++i) { Node* node = block->node(i); - if (m_graph.m_refCountState == ExactRefCount) + if (m_graph.m_refCountState == ExactRefCount) { V_EQUAL((node), m_myRefCounts.get(node), node->adjustedRefCount()); + if (node->isTuple()) { + for (unsigned j = 0; j < node->tupleSize(); ++j) + V_EQUAL((node), m_myTupleRefCounts.at(node->tupleOffset() + j), m_graph.m_tupleData.at(node->tupleOffset() + j).refCount); + } + } } bool foundTerminal = false; @@ -488,13 +503,6 @@ class Validate { } private: - Graph& m_graph; - GraphDumpMode m_graphDumpMode; - CString m_graphDumpBeforePhase; - - HashMap m_myRefCounts; - HashSet m_acceptableNodes; - void validateCPS() { VALIDATE((), !m_graph.m_rootToArguments.isEmpty()); // We should have at least one root. @@ -1086,6 +1094,14 @@ class Validate { dataLog("At time of failure:\n"); m_graph.dump(); } + + Graph& m_graph; + GraphDumpMode m_graphDumpMode; + CString m_graphDumpBeforePhase; + + HashMap m_myRefCounts; + Vector m_myTupleRefCounts; + HashSet m_acceptableNodes; }; } // End anonymous namespace. diff --git a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp index a28ac99a07da..15b364456112 100644 --- a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp @@ -69,6 +69,13 @@ class VirtualRegisterAllocationPhase : public Phase { case GetLocal: ASSERT(!node->child1()->hasResult()); break; + + case ExtractFromTuple: + ASSERT(m_graph.m_tupleData.at(node->tupleIndex()).refCount == 1); + node->setVirtualRegister(m_graph.m_tupleData.at(node->tupleIndex()).virtualRegister); + ASSERT(!node->mustGenerate()); + continue; + default: break; } @@ -86,6 +93,17 @@ class VirtualRegisterAllocationPhase : public Phase { scoreBoard.useIfHasResult(node->child3()); } + if (node->isTuple()) { + ASSERT(node->refCount() <= node->tupleSize()); + for (unsigned i = 0; i < node->tupleSize(); ++i) { + auto& tupleData = m_graph.m_tupleData.at(node->tupleOffset() + i); + if (tupleData.refCount) + tupleData.virtualRegister = scoreBoard.allocate(); + } + ASSERT(!node->hasResult()); + continue; + } + if (!node->hasResult()) continue; diff --git a/Source/JavaScriptCore/ftl/FTLCapabilities.cpp b/Source/JavaScriptCore/ftl/FTLCapabilities.cpp index 5df1a6e8a63d..b1da7a560513 100644 --- a/Source/JavaScriptCore/ftl/FTLCapabilities.cpp +++ b/Source/JavaScriptCore/ftl/FTLCapabilities.cpp @@ -55,6 +55,7 @@ inline CapabilityLevel canCompile(Node* node) case Phantom: case Flush: case PhantomLocal: + case ExtractFromTuple: case SetArgumentDefinitely: case SetArgumentMaybe: case Return: @@ -299,8 +300,6 @@ inline CapabilityLevel canCompile(Node* node) case ResolveRope: case GetPropertyEnumerator: case EnumeratorNextUpdateIndexAndMode: - case EnumeratorNextExtractMode: - case EnumeratorNextExtractIndex: case EnumeratorNextUpdatePropertyName: case EnumeratorGetByVal: case EnumeratorInByVal: diff --git a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp index 3eff17797a23..5faf047a750e 100644 --- a/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp +++ b/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp @@ -159,6 +159,7 @@ class LowerDFGToB3 { , m_ftlState(state) , m_out(state) , m_proc(*state.proc) + , m_tupleValues(m_graph.m_tupleData.size()) , m_availabilityCalculator(m_graph) , m_state(state.graph) , m_interpreter(state.graph, m_state) @@ -736,6 +737,9 @@ class LowerDFGToB3 { case DFG::Phi: compilePhi(); break; + case ExtractFromTuple: + compileExtractFromTuple(); + break; case JSConstant: break; case DoubleConstant: @@ -1572,12 +1576,6 @@ class LowerDFGToB3 { case EnumeratorNextUpdateIndexAndMode: compileEnumeratorNextUpdateIndexAndMode(); break; - case EnumeratorNextExtractIndex: - compileEnumeratorNextExtractIndex(); - break; - case EnumeratorNextExtractMode: - compileEnumeratorNextExtractMode(); - break; case EnumeratorNextUpdatePropertyName: compileEnumeratorNextUpdatePropertyName(); break; @@ -14228,7 +14226,8 @@ IGNORE_CLANG_WARNINGS_END m_out.appendTo(continuation); LValue finalIndex = m_out.phi(Int32, finalIncrementedIndex, finalPropertyIndex); - setJSValue(m_out.bitOr(m_out.zeroExt(finalIndex, Int64), m_out.constInt64(JSValue::NumberTag | static_cast(JSPropertyNameEnumerator::IndexedMode) << 32))); + setTuple(0, finalIndex); + setTuple(1, m_out.constInt32(static_cast(JSPropertyNameEnumerator::IndexedMode))); return; } @@ -14251,27 +14250,47 @@ IGNORE_CLANG_WARNINGS_END m_out.appendTo(continuation); index = m_out.phi(Int32, initialIndex, incrementedIndex); - setJSValue(m_out.bitOr(m_out.zeroExt(index, Int64), m_out.constInt64(JSValue::DoubleEncodeOffset | static_cast(JSPropertyNameEnumerator::OwnStructureMode) << 32))); + setTuple(0, index); + setTuple(1, m_out.constInt32(static_cast(JSPropertyNameEnumerator::OwnStructureMode))); return; } LValue base = lowJSValue(baseEdge); - setJSValue(vmCall(Int64, operationEnumeratorNextUpdateIndexAndMode, weakPointer(globalObject), base, index, mode, enumerator)); - } + LValue tuple = vmCall(registerPair(), operationEnumeratorNextUpdateIndexAndMode, weakPointer(globalObject), base, index, mode, enumerator); - void compileEnumeratorNextExtractIndex() - { - LValue boxedPair = lowJSValue(m_node->child1()); - - setInt32(m_out.castToInt32(boxedPair)); + setTuple(0, m_out.castToInt32(m_out.extract(tuple, 0))); + setTuple(1, m_out.castToInt32(m_out.extract(tuple, 1))); } - void compileEnumeratorNextExtractMode() + void compileExtractFromTuple() { - LValue boxedPair = lowJSValue(m_node->child1()); + auto& loweredNodeValue = m_tupleValues.at(m_node->tupleIndex()); + ASSERT(isValid(loweredNodeValue)); + LValue result = loweredNodeValue.value(); - LValue highBits = m_out.castToInt32(m_out.lShr(boxedPair, m_out.constInt32(32))); - setInt32(m_out.bitAnd(highBits, m_out.constInt32(JSPropertyNameEnumerator::enumerationModeMask))); + switch (m_graph.m_tupleData.at(m_node->tupleIndex()).resultFlags) { + case NodeResultJS: + case NodeResultNumber: + setJSValue(result); + break; + case NodeResultDouble: + setDouble(result); + break; + case NodeResultInt32: + setInt32(result); + break; + case NodeResultBoolean: + setBoolean(result); + break; + case NodeResultStorage: + setStorage(result); + break; + + // FIXME: These are not supported because it wasn't exactly clear how to implement them and they are not currently used. + case NodeResultInt52: + default: + RELEASE_ASSERT_NOT_REACHED(); + } } // FIXME: We should probably have a method of value recovery for this node since it's "effect" free but always live in bytecode. @@ -21645,6 +21664,11 @@ IGNORE_CLANG_WARNINGS_END { m_doubleValues.set(node, LoweredNodeValue(value, m_highBlock)); } + void setTuple(Node* tuple, unsigned index, LValue value) + { + ASSERT(index < tuple->tupleSize()); + m_tupleValues.at(tuple->tupleOffset() + index) = LoweredNodeValue(value, m_highBlock); + } void setInt32(LValue value) { @@ -21678,6 +21702,10 @@ IGNORE_CLANG_WARNINGS_END { setDouble(m_node, value); } + void setTuple(unsigned index, LValue value) + { + setTuple(m_node, index, value); + } bool isValid(const LoweredNodeValue& value) { @@ -21831,6 +21859,13 @@ IGNORE_CLANG_WARNINGS_END return abstractStructure(edge.node()); } + LType registerPair() + { + if (!m_registerPair.isTuple()) + m_registerPair = m_proc.addTuple({ registerType(), registerType() }); + return m_registerPair; + } + void crash() { crash(m_highBlock, m_node); @@ -21917,6 +21952,8 @@ IGNORE_CLANG_WARNINGS_END HashMap m_storageValues; HashMap m_doubleValues; + Vector m_tupleValues; + HashMap m_phis; LocalOSRAvailabilityCalculator m_availabilityCalculator; @@ -21935,6 +21972,8 @@ IGNORE_CLANG_WARNINGS_END HashMap m_liveInToNode; HashMap m_aiCheckedNodes; String m_graphDump; + + LType m_registerPair; }; } // anonymous namespace diff --git a/Source/JavaScriptCore/ftl/FTLOutput.cpp b/Source/JavaScriptCore/ftl/FTLOutput.cpp index a82632f520ff..d865730b9dab 100644 --- a/Source/JavaScriptCore/ftl/FTLOutput.cpp +++ b/Source/JavaScriptCore/ftl/FTLOutput.cpp @@ -128,6 +128,11 @@ LValue Output::opaque(LValue value) return m_block->appendNew(m_proc, Opaque, origin(), value); } +LValue Output::extract(LValue value, unsigned index) +{ + return m_block->appendNew(m_proc, origin(), m_proc.typeAtOffset(value->type(), index), value, index); +} + LValue Output::add(LValue left, LValue right) { if (Value* result = left->addConstant(m_proc, right)) { diff --git a/Source/JavaScriptCore/ftl/FTLOutput.h b/Source/JavaScriptCore/ftl/FTLOutput.h index 9f430fbb0c80..8249fe7dc03e 100644 --- a/Source/JavaScriptCore/ftl/FTLOutput.h +++ b/Source/JavaScriptCore/ftl/FTLOutput.h @@ -149,6 +149,7 @@ class Output : public CommonValues { void addIncomingToPhiIfSet(LValue phi, Params... theRest); LValue opaque(LValue); + LValue extract(LValue tuple, unsigned index); LValue add(LValue, LValue); LValue sub(LValue, LValue); diff --git a/Source/JavaScriptCore/wasm/WasmAirIRGeneratorBase.h b/Source/JavaScriptCore/wasm/WasmAirIRGeneratorBase.h index f6e7c7b51a22..9ad5bb6c82cc 100644 --- a/Source/JavaScriptCore/wasm/WasmAirIRGeneratorBase.h +++ b/Source/JavaScriptCore/wasm/WasmAirIRGeneratorBase.h @@ -30,6 +30,7 @@ #if ENABLE(WEBASSEMBLY_B3JIT) +#include "AirCCallSpecial.h" #include "AirCode.h" #include "AirGenerate.h" #include "AirHelpers.h" @@ -834,7 +835,7 @@ struct AirIRGeneratorBase { B3::Value* dummyFunc = m_proc.addConstant(B3::Origin(), B3::pointerType(), bitwise_cast(func)); B3::Value* origin = m_proc.add(resultType, B3::Origin(), B3::Effects::none(), dummyFunc, makeDummyValue(theArgs)...); - Inst inst(CCall, origin); + Inst inst(CCall, origin, Arg::special(m_proc.code().cCallSpecial())); auto callee = self().gPtr(); append(block, Move, Arg::immPtr(tagCFunctionPtr(func)), callee);