Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
[JSC] Integrate inlined megamorphic access in DFG and FTL
https://bugs.webkit.org/show_bug.cgi?id=255821
rdar://108398043

Reviewed by Mark Lam.

DFG and FTL should get Baseline IC's megamorphic GetById state, and emit special GetByIdMegamorphic node, which
does megamorphic access inline (without IC) from the beginning. This is (1) faster than IC and (2) avoid repeated
repatching of code.
Here is a bit fun thing: emitting GetByIdMegamorphic means that we give up polymorphic IC optimization. So this needs very careful handling.
It is possible that one function can be inlined from the other function, and then it gets limited # of structures.
In this case, continue using IC is better than falling back to megamorphic case. But if the function gets compiled before,
and even optimizing JIT saw the megamorphism, then this is likely that this function continues having megamorphic behavior,
and inlined megamorphic code is faster. Currently, we use GetByIdMegamorphic only when the exact same form of CodeOrigin gets
this megamorphic state before (same level of inlining etc.). This is very conservative but effective since IC is very fast
when it worked well (but costly if it doesn't work and get megamorphic).
Once this cost-benefit tradeoff gets changed (via handler IC), we can revisit this condition.

                                           ToT                     Patched

    megamorphic-own-load             37.0244+-0.1000     ^     34.3635+-0.0982        ^ definitely 1.0774x faster
    megamorphic-dfg                   7.4125+-0.0400            7.3945+-0.0251
    megamorphic-load                  4.5447+-0.0232     ^      4.3989+-0.0293        ^ definitely 1.0332x faster
    megamorphic-prototype-load       37.0116+-0.1119     ^     34.4312+-0.1764        ^ definitely 1.0749x faster
    megamorphic-miss                 30.6568+-0.0471     ^     28.5222+-0.1031        ^ definitely 1.0748x faster

* Source/JavaScriptCore/bytecode/GetByStatus.cpp:
(JSC::GetByStatus::computeFor):
(JSC::GetByStatus::GetByStatus):
(JSC::isSameStyledCodeOrigin):
(JSC::GetByStatus::computeForStubInfoWithoutExitSiteFeedback):
(JSC::GetByStatus::makesCalls const):
(JSC::GetByStatus::merge):
(JSC::GetByStatus::dump const):
* Source/JavaScriptCore/bytecode/GetByStatus.h:
* Source/JavaScriptCore/bytecode/InlineCacheCompiler.cpp:
(JSC::InlineCacheCompiler::generateWithGuard):
* Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h:
(JSC::DFG::AbstractInterpreter<AbstractStateType>::executeEffects):
* Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::handleGetById):
* Source/JavaScriptCore/dfg/DFGClobberize.h:
(JSC::DFG::clobberize):
* Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp:
(JSC::DFG::ConstantFoldingPhase::foldConstants):
* Source/JavaScriptCore/dfg/DFGDoesGC.cpp:
(JSC::DFG::doesGC):
* Source/JavaScriptCore/dfg/DFGFixupPhase.cpp:
(JSC::DFG::FixupPhase::fixupNode):
* Source/JavaScriptCore/dfg/DFGNode.h:
(JSC::DFG::Node::convertToGetByOffset):
(JSC::DFG::Node::convertToMultiGetByOffset):
(JSC::DFG::Node::hasCacheableIdentifier):
(JSC::DFG::Node::hasHeapPrediction):
* Source/JavaScriptCore/dfg/DFGNodeType.h:
* Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp:
* Source/JavaScriptCore/dfg/DFGSafeToExecute.h:
(JSC::DFG::safeToExecute):
* Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h:
* Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::compile):
* Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::compile):
(JSC::DFG::SpeculativeJIT::compileGetByIdMegamorphic):
* Source/JavaScriptCore/ftl/FTLCapabilities.cpp:
(JSC::FTL::canCompile):
* Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp:
(JSC::FTL::DFG::LowerDFGToB3::compileNode):
(JSC::FTL::DFG::LowerDFGToB3::compileGetByIdMegamorphic):
* Source/JavaScriptCore/jit/AssemblyHelpers.cpp:
(JSC::AssemblyHelpers::loadMegamorphicProperty):
* Source/JavaScriptCore/jit/AssemblyHelpers.h:
* Source/JavaScriptCore/jit/JITOperations.cpp:
(JSC::JSC_DEFINE_JIT_OPERATION):

Canonical link: https://commits.webkit.org/263300@main
  • Loading branch information
Constellation committed Apr 24, 2023
1 parent 9738436 commit 98d5e19
Show file tree
Hide file tree
Showing 21 changed files with 262 additions and 85 deletions.
64 changes: 52 additions & 12 deletions Source/JavaScriptCore/bytecode/GetByStatus.cpp
Expand Up @@ -33,6 +33,7 @@
#include "GetterSetterAccessCase.h"
#include "ICStatusUtils.h"
#include "InlineCacheCompiler.h"
#include "InlineCallFrame.h"
#include "IntrinsicGetterAccessCase.h"
#include "ModuleNamespaceAccessCase.h"
#include "ProxyObjectAccessCase.h"
Expand Down Expand Up @@ -147,15 +148,14 @@ GetByStatus GetByStatus::computeFromLLInt(CodeBlock* profiledBlock, BytecodeInde
return result;
}

GetByStatus GetByStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, BytecodeIndex bytecodeIndex, ExitFlag didExit, CallLinkStatus::ExitSiteData callExitSiteData)
GetByStatus GetByStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map, ExitFlag didExit, CallLinkStatus::ExitSiteData callExitSiteData, CodeOrigin codeOrigin)
{
ConcurrentJSLocker locker(profiledBlock->m_lock);

GetByStatus result;

#if ENABLE(DFG_JIT)
result = computeForStubInfoWithoutExitSiteFeedback(
locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)).stubInfo, callExitSiteData);
result = computeForStubInfoWithoutExitSiteFeedback(locker, profiledBlock, map.get(CodeOrigin(codeOrigin.bytecodeIndex())).stubInfo, callExitSiteData, codeOrigin);

if (didExit)
return result.slowVersion();
Expand All @@ -166,7 +166,7 @@ GetByStatus GetByStatus::computeFor(CodeBlock* profiledBlock, ICStatusMap& map,
#endif

if (!result)
return computeFromLLInt(profiledBlock, bytecodeIndex);
return computeFromLLInt(profiledBlock, codeOrigin.bytecodeIndex());

return result;
}
Expand All @@ -184,6 +184,9 @@ GetByStatus::GetByStatus(StubInfoSummary summary, StructureStubInfo* stubInfo)
RELEASE_ASSERT_NOT_REACHED();
return;
case StubInfoSummary::Megamorphic:
ASSERT(stubInfo);
m_state = stubInfo->tookSlowPath ? ObservedTakesSlowPath : Megamorphic;
return;
case StubInfoSummary::TakesSlowPath:
ASSERT(stubInfo);
m_state = stubInfo->tookSlowPath ? ObservedTakesSlowPath : LikelyTakesSlowPath;
Expand All @@ -209,8 +212,21 @@ GetByStatus::GetByStatus(const ProxyObjectAccessCase&)
{
}

GetByStatus GetByStatus::computeForStubInfoWithoutExitSiteFeedback(
const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CallLinkStatus::ExitSiteData callExitSiteData)
static bool isSameStyledCodeOrigin(CodeOrigin lhs, CodeOrigin rhs)
{
while (true) {
if (lhs.bytecodeIndex() != rhs.bytecodeIndex())
return false;
if (!!lhs.inlineCallFrame() != !!rhs.inlineCallFrame())
return false;
if (!lhs.inlineCallFrame())
return true;
lhs = lhs.inlineCallFrame()->directCaller;
rhs = rhs.inlineCallFrame()->directCaller;
}
}

GetByStatus GetByStatus::computeForStubInfoWithoutExitSiteFeedback(const ConcurrentJSLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CallLinkStatus::ExitSiteData callExitSiteData, CodeOrigin codeOrigin)
{
StubInfoSummary summary = StructureStubInfo::summary(profiledBlock->vm(), stubInfo);
if (!isInlineable(summary))
Expand Down Expand Up @@ -261,6 +277,19 @@ GetByStatus GetByStatus::computeForStubInfoWithoutExitSiteFeedback(
status.appendVariant(GetByVariant(accessCase.identifier(), { }, invalidOffset, { }, WTFMove(callLinkStatus)));
return status;
}
case AccessCase::LoadMegamorphic: {
// Emitting LoadMegamorphic means that we give up polymorphic IC optimization. So this needs very careful handling.
// It is possible that one function can be inlined from the other function, and then it gets limited # of structures.
// In this case, continue using IC is better than falling back to megamorphic case. But if the function gets compiled before,
// and even optimizing JIT saw the megamorphism, then this is likely that this function continues having megamorphic behavior,
// and inlined megamorphic code is faster. Currently, we use LoadMegamorphic only when the exact same form of CodeOrigin gets
// this megamorphic GetById before (same level of inlining etc.). This is very conservative but effective since IC is very fast
// when it worked well (but costly if it doesn't work and get megamorphic). Once this cost-benefit tradeoff gets changed (via
// handler IC), we can revisit this condition.
if (isSameStyledCodeOrigin(stubInfo->codeOrigin, codeOrigin) && !stubInfo->tookSlowPath)
return GetByStatus(Megamorphic, /* wasSeenInJIT */ true);
break;
}
default:
break;
}
Expand Down Expand Up @@ -392,9 +421,7 @@ GetByStatus GetByStatus::computeFor(
if (!context->isInlined(codeOrigin)) {
// Merge with baseline result, which also happens to contain exit data for both
// inlined and not-inlined.
GetByStatus baselineResult = computeFor(
profiledBlock, baselineMap, bytecodeIndex, didExit,
callExitSiteData);
GetByStatus baselineResult = computeFor(profiledBlock, baselineMap, didExit, callExitSiteData, codeOrigin);
baselineResult.merge(result);
return baselineResult;
}
Expand All @@ -407,8 +434,7 @@ GetByStatus GetByStatus::computeFor(
GetByStatus result;
{
ConcurrentJSLocker locker(context->optimizedCodeBlock->m_lock);
result = computeForStubInfoWithoutExitSiteFeedback(
locker, context->optimizedCodeBlock, status.stubInfo, callExitSiteData);
result = computeForStubInfoWithoutExitSiteFeedback(locker, context->optimizedCodeBlock, status.stubInfo, callExitSiteData, codeOrigin);
}
if (result.isSet())
return bless(result);
Expand All @@ -418,7 +444,7 @@ GetByStatus GetByStatus::computeFor(
return bless(*status.getStatus);
}

return computeFor(profiledBlock, baselineMap, bytecodeIndex, didExit, callExitSiteData);
return computeFor(profiledBlock, baselineMap, didExit, callExitSiteData, codeOrigin);
}

GetByStatus GetByStatus::computeFor(const StructureSet& set, UniquedStringImpl* uid)
Expand Down Expand Up @@ -483,6 +509,7 @@ bool GetByStatus::makesCalls() const
case ProxyObject:
case MakesCalls:
case ObservedSlowPathAndMakesCalls:
case Megamorphic:
return true;
}
RELEASE_ASSERT_NOT_REACHED();
Expand Down Expand Up @@ -513,6 +540,16 @@ void GetByStatus::merge(const GetByStatus& other)
case NoInformation:
*this = other;
return;

case Megamorphic:
if (m_state != other.m_state) {
if (other.m_state == Simple || other.m_state == Custom) {
*this = other;
return;
}
return mergeSlow();
}
return;

case Simple:
case Custom:
Expand Down Expand Up @@ -618,6 +655,9 @@ void GetByStatus::dump(PrintStream& out) const
case Custom:
out.print("Custom");
break;
case Megamorphic:
out.print("Megamorphic");
break;
case ModuleNamespace:
out.print("ModuleNamespace");
break;
Expand Down
15 changes: 10 additions & 5 deletions Source/JavaScriptCore/bytecode/GetByStatus.h
Expand Up @@ -58,6 +58,8 @@ class GetByStatus final {
Simple,
// It's cached for a custom accessor with a possible structure chain.
Custom,
// It's cached for a megamorphic case.
Megamorphic,
// It's cached for an access to a module namespace object's binding.
ModuleNamespace,
// It's cached for an access to a proxy object's binding.
Expand All @@ -69,7 +71,7 @@ class GetByStatus final {
// It will likely take the slow path and will make calls.
MakesCalls,
// It known to take paths that make calls. We also observed that the slow path was taken on StructureStubInfo.
ObservedSlowPathAndMakesCalls ,
ObservedSlowPathAndMakesCalls,
};

GetByStatus()
Expand Down Expand Up @@ -101,6 +103,7 @@ class GetByStatus final {
explicit operator bool() const { return isSet(); }
bool isSimple() const { return m_state == Simple; }
bool isCustom() const { return m_state == Custom; }
bool isMegamorphic() const { return m_state == Megamorphic; }
bool isModuleNamespace() const { return m_state == ModuleNamespace; }
bool isProxyObject() const { return m_state == ProxyObject; }

Expand All @@ -109,7 +112,10 @@ class GetByStatus final {
const GetByVariant& at(size_t index) const { return m_variants[index]; }
const GetByVariant& operator[](size_t index) const { return at(index); }

bool takesSlowPath() const { return m_state == LikelyTakesSlowPath || m_state == ObservedTakesSlowPath || m_state == MakesCalls || m_state == ObservedSlowPathAndMakesCalls || m_state == Custom || m_state == ModuleNamespace; }
bool takesSlowPath() const
{
return m_state == LikelyTakesSlowPath || m_state == ObservedTakesSlowPath || m_state == MakesCalls || m_state == ObservedSlowPathAndMakesCalls || m_state == Custom || m_state == ModuleNamespace || m_state == Megamorphic;
}
bool observedStructureStubInfoSlowPath() const { return m_state == ObservedTakesSlowPath || m_state == ObservedSlowPathAndMakesCalls; }
bool makesCalls() const;

Expand Down Expand Up @@ -141,11 +147,10 @@ class GetByStatus final {
#if ENABLE(JIT)
GetByStatus(const ModuleNamespaceAccessCase&);
GetByStatus(const ProxyObjectAccessCase&);
static GetByStatus computeForStubInfoWithoutExitSiteFeedback(
const ConcurrentJSLocker&, CodeBlock* profiledBlock, StructureStubInfo*, CallLinkStatus::ExitSiteData);
static GetByStatus computeForStubInfoWithoutExitSiteFeedback(const ConcurrentJSLocker&, CodeBlock* profiledBlock, StructureStubInfo*, CallLinkStatus::ExitSiteData, CodeOrigin);
#endif
static GetByStatus computeFromLLInt(CodeBlock*, BytecodeIndex);
static GetByStatus computeFor(CodeBlock*, ICStatusMap&, BytecodeIndex, ExitFlag, CallLinkStatus::ExitSiteData);
static GetByStatus computeFor(CodeBlock*, ICStatusMap&, ExitFlag, CallLinkStatus::ExitSiteData, CodeOrigin);

struct ModuleNamespaceData {
JSModuleNamespaceObject* m_moduleNamespaceObject { nullptr };
Expand Down
61 changes: 4 additions & 57 deletions Source/JavaScriptCore/bytecode/InlineCacheCompiler.cpp
Expand Up @@ -1264,7 +1264,7 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers
ASSERT(!accessCase.viaGlobalProxy());
CCallHelpers::JumpList primaryFail;
CCallHelpers::JumpList failAndRepatch;
unsigned hash = accessCase.m_identifier.uid()->hash();
auto* uid = accessCase.m_identifier.uid();

auto allocator = makeDefaultScratchAllocator(scratchGPR);
GPRReg scratch2GPR = allocator.allocateScratchGPR();
Expand All @@ -1273,70 +1273,17 @@ void InlineCacheCompiler::generateWithGuard(AccessCase& accessCase, CCallHelpers

ScratchRegisterAllocator::PreservedState preservedState = allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);

jit.load32(CCallHelpers::Address(baseGPR, JSCell::structureIDOffset()), scratchGPR);
auto slowCases = jit.loadMegamorphicProperty(vm, baseGPR, uid, valueRegs.payloadGPR(), scratchGPR, scratch2GPR, scratch3GPR, scratch4GPR);

// Primary cache lookup
jit.urshift32(scratchGPR, CCallHelpers::TrustedImm32(MegamorphicCache::structureIDHashShift1), scratch2GPR);
jit.urshift32(scratchGPR, CCallHelpers::TrustedImm32(MegamorphicCache::structureIDHashShift2), scratch3GPR);
jit.xor32(scratch2GPR, scratch3GPR);
jit.add32(CCallHelpers::TrustedImm32(hash), scratch3GPR);
jit.and32(CCallHelpers::TrustedImm32(MegamorphicCache::primaryMask), scratch3GPR);
if constexpr (hasOneBitSet(sizeof(MegamorphicCache::Entry))) // is a power of 2
jit.lshift32(CCallHelpers::TrustedImm32(getLSBSet(sizeof(MegamorphicCache::Entry))), scratch3GPR);
else
jit.mul32(CCallHelpers::TrustedImm32(sizeof(MegamorphicCache::Entry)), scratch3GPR, scratch3GPR);
auto& cache = vm.ensureMegamorphicCache();
jit.move(CCallHelpers::TrustedImmPtr(&cache), scratch2GPR);
ASSERT(!MegamorphicCache::offsetOfPrimaryEntries());
jit.addPtr(scratch2GPR, scratch3GPR);

jit.load16(CCallHelpers::Address(scratch2GPR, MegamorphicCache::offsetOfEpoch()), scratch4GPR);

jit.load16(CCallHelpers::Address(scratch3GPR, MegamorphicCache::Entry::offsetOfEpoch()), scratch2GPR);
primaryFail.append(jit.branch32(CCallHelpers::NotEqual, scratch4GPR, scratch2GPR));
primaryFail.append(jit.branch32(CCallHelpers::NotEqual, scratchGPR, CCallHelpers::Address(scratch3GPR, MegamorphicCache::Entry::offsetOfStructureID())));
primaryFail.append(jit.branchPtr(CCallHelpers::NotEqual, CCallHelpers::Address(scratch3GPR, MegamorphicCache::Entry::offsetOfUid()), CCallHelpers::TrustedImmPtr(accessCase.m_identifier.uid())));

// Cache hit!
CCallHelpers::Label cacheHit = jit.label();
jit.loadPtr(CCallHelpers::Address(scratch3GPR, MegamorphicCache::Entry::offsetOfHolder()), scratch2GPR);
auto missed = jit.branchTestPtr(CCallHelpers::Zero, scratch2GPR);
jit.moveConditionally64(CCallHelpers::Equal, scratch2GPR, CCallHelpers::TrustedImm32(bitwise_cast<uintptr_t>(JSCell::seenMultipleCalleeObjects())), baseGPR, scratch2GPR, scratchGPR);
jit.load16(CCallHelpers::Address(scratch3GPR, MegamorphicCache::Entry::offsetOfOffset()), scratch2GPR);
jit.loadProperty(scratchGPR, scratch2GPR, valueRegs);
auto done = jit.jump();

missed.link(&jit);
jit.moveTrustedValue(jsUndefined(), valueRegs);

done.link(&jit);
allocator.restoreReusedRegistersByPopping(jit, preservedState);
succeed();

// Secondary cache lookup
primaryFail.link(&jit);
jit.add32(CCallHelpers::TrustedImm32(static_cast<uint32_t>(bitwise_cast<uintptr_t>(accessCase.m_identifier.uid()))), scratchGPR, scratch2GPR);
jit.urshift32(scratch2GPR, CCallHelpers::TrustedImm32(MegamorphicCache::structureIDHashShift3), scratch3GPR);
jit.add32(scratch2GPR, scratch3GPR);
jit.and32(CCallHelpers::TrustedImm32(MegamorphicCache::secondaryMask), scratch3GPR);
if constexpr (hasOneBitSet(sizeof(MegamorphicCache::Entry))) // is a power of 2
jit.lshift32(CCallHelpers::TrustedImm32(getLSBSet(sizeof(MegamorphicCache::Entry))), scratch3GPR);
else
jit.mul32(CCallHelpers::TrustedImm32(sizeof(MegamorphicCache::Entry)), scratch3GPR, scratch3GPR);
jit.addPtr(CCallHelpers::TrustedImmPtr(bitwise_cast<uint8_t*>(&cache) + MegamorphicCache::offsetOfSecondaryEntries()), scratch3GPR);

jit.load16(CCallHelpers::Address(scratch3GPR, MegamorphicCache::Entry::offsetOfEpoch()), scratch2GPR);
failAndRepatch.append(jit.branch32(CCallHelpers::NotEqual, scratch4GPR, scratch2GPR));
failAndRepatch.append(jit.branch32(CCallHelpers::NotEqual, scratchGPR, CCallHelpers::Address(scratch3GPR, MegamorphicCache::Entry::offsetOfStructureID())));
failAndRepatch.append(jit.branchPtr(CCallHelpers::NotEqual, CCallHelpers::Address(scratch3GPR, MegamorphicCache::Entry::offsetOfUid()), CCallHelpers::TrustedImmPtr(accessCase.m_identifier.uid())));
jit.jump().linkTo(cacheHit, &jit);

if (allocator.didReuseRegisters()) {
failAndRepatch.link(&jit);
slowCases.link(&jit);
allocator.restoreReusedRegistersByPopping(jit, preservedState);
m_failAndRepatch.append(jit.jump());
} else
m_failAndRepatch.append(failAndRepatch);
m_failAndRepatch.append(slowCases);
#endif
return;
}
Expand Down
3 changes: 2 additions & 1 deletion Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
Expand Up @@ -3644,7 +3644,8 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
case GetByIdDirect:
case GetByIdDirectFlush:
case GetById:
case GetByIdFlush: {
case GetByIdFlush:
case GetByIdMegamorphic: {
AbstractValue& value = forNode(node->child1());

if (Options::useAccessInlining()
Expand Down
8 changes: 8 additions & 0 deletions Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
Expand Up @@ -5021,6 +5021,14 @@ void ByteCodeParser::handleGetById(
return;
}
}
#if USE(JSVALUE64)
if (type == AccessType::GetById) {
if (getByStatus.isMegamorphic()) {
set(destination, addToGraph(GetByIdMegamorphic, OpInfo(identifier), OpInfo(prediction), base));
return;
}
}
#endif
}

// Special path for custom accessors since custom's offset does not have any meanings.
Expand Down
1 change: 1 addition & 0 deletions Source/JavaScriptCore/dfg/DFGClobberize.h
Expand Up @@ -702,6 +702,7 @@ void clobberize(Graph& graph, Node* node, const ReadFunctor& read, const WriteFu

case GetById:
case GetByIdFlush:
case GetByIdMegamorphic:
case GetByIdWithThis:
case GetByIdDirect:
case GetByIdDirectFlush:
Expand Down
1 change: 1 addition & 0 deletions Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
Expand Up @@ -600,6 +600,7 @@ class ConstantFoldingPhase : public Phase {
case GetByIdDirectFlush:
case GetById:
case GetByIdFlush:
case GetByIdMegamorphic:
case GetPrivateNameById: {
Edge childEdge = node->child1();
Node* child = childEdge.node();
Expand Down
1 change: 1 addition & 0 deletions Source/JavaScriptCore/dfg/DFGDoesGC.cpp
Expand Up @@ -297,6 +297,7 @@ bool doesGC(Graph& graph, Node* node)
case GetByIdDirect:
case GetByIdDirectFlush:
case GetByIdFlush:
case GetByIdMegamorphic:
case GetByIdWithThis:
case GetByValWithThis:
case GetDynamicVar:
Expand Down
6 changes: 5 additions & 1 deletion Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
Expand Up @@ -1954,7 +1954,11 @@ class FixupPhase : public Phase {
fixEdge<CellUse>(node->child1());
break;
}


case GetByIdMegamorphic:
fixEdge<CellUse>(node->child1());
break;

case GetByIdWithThis: {
if (node->child1()->shouldSpeculateCell() && node->child2()->shouldSpeculateCell()) {
fixEdge<CellUse>(node->child1());
Expand Down
6 changes: 4 additions & 2 deletions Source/JavaScriptCore/dfg/DFGNode.h
Expand Up @@ -617,7 +617,7 @@ struct Node {

void convertToGetByOffset(StorageAccessData& data, Edge storage, Edge base)
{
ASSERT(m_op == GetById || m_op == GetByIdFlush || m_op == GetByIdDirect || m_op == GetByIdDirectFlush || m_op == GetPrivateNameById || m_op == MultiGetByOffset);
ASSERT(m_op == GetById || m_op == GetByIdFlush || m_op == GetByIdDirect || m_op == GetByIdDirectFlush || m_op == GetPrivateNameById || m_op == MultiGetByOffset || m_op == GetByIdMegamorphic);
m_opInfo = &data;
children.setChild1(storage);
children.setChild2(base);
Expand All @@ -627,7 +627,7 @@ struct Node {

void convertToMultiGetByOffset(MultiGetByOffsetData* data)
{
RELEASE_ASSERT(m_op == GetById || m_op == GetByIdFlush || m_op == GetByIdDirect || m_op == GetByIdDirectFlush || m_op == GetPrivateNameById);
RELEASE_ASSERT(m_op == GetById || m_op == GetByIdFlush || m_op == GetByIdDirect || m_op == GetByIdDirectFlush || m_op == GetPrivateNameById || m_op == GetByIdMegamorphic);
m_opInfo = data;
child1().setUseKind(CellUse);
m_op = MultiGetByOffset;
Expand Down Expand Up @@ -1105,6 +1105,7 @@ struct Node {
case TryGetById:
case GetById:
case GetByIdFlush:
case GetByIdMegamorphic:
case GetByIdWithThis:
case GetByIdDirect:
case GetByIdDirectFlush:
Expand Down Expand Up @@ -1847,6 +1848,7 @@ struct Node {
case ArithTrunc:
case GetById:
case GetByIdFlush:
case GetByIdMegamorphic:
case GetByIdWithThis:
case GetByIdDirect:
case GetByIdDirectFlush:
Expand Down
1 change: 1 addition & 0 deletions Source/JavaScriptCore/dfg/DFGNodeType.h
Expand Up @@ -221,6 +221,7 @@ namespace JSC { namespace DFG {
macro(GetByIdWithThis, NodeResultJS | NodeMustGenerate) \
macro(GetByIdDirect, NodeResultJS | NodeMustGenerate) \
macro(GetByIdDirectFlush, NodeResultJS | NodeMustGenerate) \
macro(GetByIdMegamorphic, NodeResultJS | NodeMustGenerate) \
macro(PutById, NodeMustGenerate) \
macro(PutByIdFlush, NodeMustGenerate) \
macro(PutByIdDirect, NodeMustGenerate) \
Expand Down

0 comments on commit 98d5e19

Please sign in to comment.