Skip to content
Permalink
Browse files
[JSC] DFG should be able to compile-and-inline op_call_eval
https://bugs.webkit.org/show_bug.cgi?id=245043

Reviewed by Alexey Shvayka.

We found that DFG capability check is consuming some time in Speedometer2.1. And this is wasteful
since op_call_eval is the only bytecode which cannot be included in the inlined function in DFG.
We originally had many bytecodes which cannot be compiled or inlined in DFG, and we continuously
removed them. And this op_call_eval is the last one.

In this patch, we make op_call_eval available in the inlined DFG function and we remove DFG capability
level check function since any new bytecode must be supported in DFG from the beginning. op_call_eval
was not inlinable before since it is relying on thisValue, caller CodeBlock, and scope value in the stack.
But this is achieved by a hack. We should simplify it and support op_call_eval even in the inlined function.

In this patch,

1. op_call_eval should have thisValue and scope VirtualRegister in the bytecode. And it should get them through
   that instead of getting them from CodeBlock implicitly. It also simplifies UseDef definition for op_call_eval.
2. DFG should get thisValue / scope in a normal way in CallEval instead of getting them from the stack. This removes
   a lot of Flush this / scope hacks in DFG. Since we no longer relying on stack's value with DFG's top-level CodeBlock,
   we can inline functions having op_call_eval.
3. Because of (2), DFG capability level check can always say CanCompileAndInline. We remove this costly checks.
4. We always use baseline CodeBlocks' DirectEvalCodeCache now, so we can hit the eval code cache more frequently.

* Source/JavaScriptCore/bytecode/BytecodeList.rb:
* Source/JavaScriptCore/bytecode/BytecodeUseDef.cpp:
(JSC::computeUsesForBytecodeIndexImpl):
* Source/JavaScriptCore/bytecode/BytecodeUseDef.h:
(JSC::computeUsesForBytecodeIndex):
* Source/JavaScriptCore/bytecode/DirectEvalCodeCache.cpp:
(JSC::DirectEvalCodeCache::setSlow):
* Source/JavaScriptCore/bytecode/DirectEvalCodeCache.h:
(JSC::DirectEvalCodeCache::CacheKey::CacheKey):
(JSC::DirectEvalCodeCache::CacheKey::hash const):
(JSC::DirectEvalCodeCache::CacheKey::operator== const):
(JSC::DirectEvalCodeCache::tryGet):
(JSC::DirectEvalCodeCache::set):
* Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp:
(JSC::BytecodeGenerator::emitCall):
* Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::setArgument):
(JSC::DFG::ByteCodeParser::addCallWithoutSettingResult):
(JSC::DFG::ByteCodeParser::addCall):
(JSC::DFG::ByteCodeParser::parseBlock):
* Source/JavaScriptCore/dfg/DFGCapabilities.cpp:
(JSC::DFG::capabilityLevel): Deleted.
* Source/JavaScriptCore/dfg/DFGCapabilities.h:
(JSC::DFG::canUseOSRExitFuzzing):
(JSC::DFG::evalCapabilityLevel):
(JSC::DFG::programCapabilityLevel):
(JSC::DFG::functionForCallCapabilityLevel):
(JSC::DFG::functionForConstructCapabilityLevel):
(JSC::DFG::inlineFunctionForCallCapabilityLevel):
(JSC::DFG::inlineFunctionForClosureCallCapabilityLevel):
(JSC::DFG::inlineFunctionForConstructCapabilityLevel):
(JSC::DFG::capabilityLevel): Deleted.
* Source/JavaScriptCore/dfg/DFGClobberize.h:
(JSC::DFG::clobberize):
* Source/JavaScriptCore/dfg/DFGGraph.h:
* Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::emitCall):
* Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::emitCall):
* Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp:
(JSC::FTL::DFG::LowerDFGToB3::compileCompareStrictEq):
* Source/JavaScriptCore/interpreter/Interpreter.cpp:
(JSC::eval):
* Source/JavaScriptCore/interpreter/Interpreter.h:
* Source/JavaScriptCore/jit/BaselineJITRegisters.h:
* Source/JavaScriptCore/jit/JITCall.cpp:
(JSC::JIT::compileCallEval):
* Source/JavaScriptCore/jit/JITOperations.cpp:
(JSC::JSC_DEFINE_JIT_OPERATION):
* Source/JavaScriptCore/jit/JITOperations.h:
* Source/JavaScriptCore/llint/LLIntSlowPaths.cpp:
(JSC::LLInt::commonCallEval):

Canonical link: https://commits.webkit.org/254367@main
  • Loading branch information
Constellation committed Sep 12, 2022
1 parent d4d2f95 commit fd6ff6b2f4cb99a6799715307fdde1b7fe6e98cc
Show file tree
Hide file tree
Showing 50 changed files with 259 additions and 509 deletions.
@@ -862,12 +862,14 @@
profile: ValueProfile,
}

op :call_eval,
op :call_direct_eval,
args: {
dst: VirtualRegister,
callee: VirtualRegister,
argc: unsigned,
argv: unsigned,
thisValue: VirtualRegister,
scope: VirtualRegister,
ecmaMode: ECMAMode,
},
metadata: {
@@ -1464,7 +1466,7 @@
op :op_tail_call_slow_return_location
op :op_tail_call_forward_arguments_slow_return_location
op :op_tail_call_varargs_slow_return_location
op :op_call_eval_slow_return_location
op :op_call_direct_eval_slow_return_location

op :js_trampoline_op_call
op :js_trampoline_op_construct
@@ -1479,7 +1481,7 @@
op :js_trampoline_op_tail_call_varargs_slow
op :js_trampoline_op_tail_call_forward_arguments_slow
op :js_trampoline_op_construct_varargs_slow
op :js_trampoline_op_call_eval_slow
op :js_trampoline_op_call_direct_eval_slow
op :js_trampoline_op_iterator_next_slow
op :js_trampoline_op_iterator_open_slow
op :js_trampoline_llint_function_for_call_arity_check_untag
@@ -42,7 +42,7 @@ namespace JSC {
#define USES USES_OR_DEFS
#define DEFS USES_OR_DEFS

void computeUsesForBytecodeIndexImpl(VirtualRegister scopeRegister, const JSInstruction* instruction, Checkpoint checkpoint, const ScopedLambda<void(VirtualRegister)>& functor)
void computeUsesForBytecodeIndexImpl(const JSInstruction* instruction, Checkpoint checkpoint, const ScopedLambda<void(VirtualRegister)>& functor)
{
OpcodeID opcodeID = instruction->opcodeID();

@@ -58,8 +58,6 @@ void computeUsesForBytecodeIndexImpl(VirtualRegister scopeRegister, const JSInst
int lastArg = -static_cast<int>(op.m_argv) + CallFrame::thisArgumentOffset();
for (int i = 0; i < static_cast<int>(op.m_argc); i++)
functor(VirtualRegister { lastArg + i });
if (opcodeID == op_call_eval)
functor(scopeRegister);
return;
};

@@ -323,9 +321,13 @@ void computeUsesForBytecodeIndexImpl(VirtualRegister scopeRegister, const JSInst
case op_construct:
handleOpCallLike(instruction->as<OpConstruct>());
return;
case op_call_eval:
handleOpCallLike(instruction->as<OpCallEval>());
case op_call_direct_eval: {
auto bytecode = instruction->as<OpCallDirectEval>();
handleOpCallLike(bytecode);
functor(bytecode.m_thisValue);
functor(bytecode.m_scope);
return;
}
case op_call:
handleOpCallLike(instruction->as<OpCall>());
return;
@@ -469,7 +471,7 @@ void computeDefsForBytecodeIndexImpl(unsigned numVars, const JSInstruction* inst
DEFS(OpGetFromScope, dst)
DEFS(OpCall, dst)
DEFS(OpTailCall, dst)
DEFS(OpCallEval, dst)
DEFS(OpCallDirectEval, dst)
DEFS(OpConstruct, dst)
DEFS(OpTryGetById, dst)
DEFS(OpGetById, dst)
@@ -32,17 +32,17 @@

namespace JSC {

void computeUsesForBytecodeIndexImpl(VirtualRegister, const JSInstruction*, Checkpoint, const ScopedLambda<void(VirtualRegister)>&);
void computeUsesForBytecodeIndexImpl(const JSInstruction*, Checkpoint, const ScopedLambda<void(VirtualRegister)>&);
void computeDefsForBytecodeIndexImpl(unsigned, const JSInstruction*, Checkpoint, const ScopedLambda<void(VirtualRegister)>&);

template<typename Block, typename Functor>
void computeUsesForBytecodeIndex(Block* codeBlock, const JSInstruction* instruction, Checkpoint checkpoint, const Functor& functor)
{
OpcodeID opcodeID = instruction->opcodeID();
if (opcodeID != op_enter && (codeBlock->wasCompiledWithDebuggingOpcodes() || codeBlock->usesCallEval()) && codeBlock->scopeRegister().isValid())
if (opcodeID != op_enter && codeBlock->wasCompiledWithDebuggingOpcodes() && codeBlock->scopeRegister().isValid())
functor(codeBlock->scopeRegister());

computeUsesForBytecodeIndexImpl(codeBlock->scopeRegister(), instruction, checkpoint, scopedLambda<void(VirtualRegister)>(functor));
computeUsesForBytecodeIndexImpl(instruction, checkpoint, scopedLambda<void(VirtualRegister)>(functor));
}

template<typename Block, typename Functor>
@@ -47,7 +47,7 @@ CallLinkInfo::CallType CallLinkInfo::callTypeFor(OpcodeID opcodeID)
return TailCallVarargs;

case op_call:
case op_call_eval:
case op_call_direct_eval:
case op_iterator_open:
case op_iterator_next:
return Call;
@@ -530,7 +530,7 @@ bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, Unlink

LINK(OpCall, callLinkInfo, profile)
LINK(OpTailCall, callLinkInfo, profile)
LINK(OpCallEval, callLinkInfo, profile)
LINK(OpCallDirectEval, callLinkInfo, profile)
LINK(OpConstruct, callLinkInfo, profile)
LINK(OpIteratorOpen, callLinkInfo)
LINK(OpIteratorNext, callLinkInfo)
@@ -367,8 +367,6 @@ class CodeBlock : public JSCell {

VirtualRegister thisRegister() const { return m_unlinkedCode->thisRegister(); }

bool usesCallEval() const { return m_unlinkedCode->usesCallEval(); }

void setScopeRegister(VirtualRegister scopeRegister)
{
ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid());
@@ -30,13 +30,13 @@

namespace JSC {

void DirectEvalCodeCache::setSlow(JSGlobalObject* globalObject, JSCell* owner, const String& evalSource, CallSiteIndex callSiteIndex, DirectEvalExecutable* evalExecutable)
void DirectEvalCodeCache::setSlow(JSGlobalObject* globalObject, JSCell* owner, const String& evalSource, BytecodeIndex bytecodeIndex, DirectEvalExecutable* evalExecutable)
{
if (!evalExecutable->allowDirectEvalCache())
return;

Locker locker { m_lock };
m_cacheMap.set(CacheKey(evalSource, callSiteIndex), WriteBarrier<DirectEvalExecutable>(globalObject->vm(), owner, evalExecutable));
m_cacheMap.set(CacheKey(evalSource, bytecodeIndex), WriteBarrier<DirectEvalExecutable>(globalObject->vm(), owner, evalExecutable));
}

void DirectEvalCodeCache::clear()
@@ -41,9 +41,9 @@ namespace JSC {
public:
class CacheKey {
public:
CacheKey(const String& source, CallSiteIndex callSiteIndex)
CacheKey(const String& source, BytecodeIndex bytecodeIndex)
: m_source(source.impl())
, m_callSiteIndex(callSiteIndex)
, m_bytecodeIndex(bytecodeIndex)
{
}

@@ -54,13 +54,13 @@ namespace JSC {

CacheKey() = default;

unsigned hash() const { return m_source->hash() ^ m_callSiteIndex.bits(); }
unsigned hash() const { return m_source->hash() ^ m_bytecodeIndex.asBits(); }

bool isEmptyValue() const { return !m_source; }

bool operator==(const CacheKey& other) const
{
return m_callSiteIndex == other.m_callSiteIndex && WTF::equal(m_source.get(), other.m_source.get());
return m_bytecodeIndex == other.m_bytecodeIndex && WTF::equal(m_source.get(), other.m_source.get());
}

bool isHashTableDeletedValue() const { return m_source.isHashTableDeletedValue(); }
@@ -81,18 +81,18 @@ namespace JSC {

private:
RefPtr<StringImpl> m_source;
CallSiteIndex m_callSiteIndex;
BytecodeIndex m_bytecodeIndex;
};

DirectEvalExecutable* tryGet(const String& evalSource, CallSiteIndex callSiteIndex)
DirectEvalExecutable* tryGet(const String& evalSource, BytecodeIndex bytecodeIndex)
{
return m_cacheMap.inlineGet(CacheKey(evalSource, callSiteIndex)).get();
return m_cacheMap.inlineGet(CacheKey(evalSource, bytecodeIndex)).get();
}

void set(JSGlobalObject* globalObject, JSCell* owner, const String& evalSource, CallSiteIndex callSiteIndex, DirectEvalExecutable* evalExecutable)
void set(JSGlobalObject* globalObject, JSCell* owner, const String& evalSource, BytecodeIndex bytecodeIndex, DirectEvalExecutable* evalExecutable)
{
if (m_cacheMap.size() < maxCacheEntries)
setSlow(globalObject, owner, evalSource, callSiteIndex, evalExecutable);
setSlow(globalObject, owner, evalSource, bytecodeIndex, evalExecutable);
}

bool isEmpty() const { return m_cacheMap.isEmpty(); }
@@ -104,7 +104,7 @@ namespace JSC {
private:
static constexpr int maxCacheEntries = 64;

void setSlow(JSGlobalObject*, JSCell* owner, const String& evalSource, CallSiteIndex, DirectEvalExecutable*);
void setSlow(JSGlobalObject*, JSCell* owner, const String& evalSource, BytecodeIndex, DirectEvalExecutable*);

typedef HashMap<CacheKey, WriteBarrier<DirectEvalExecutable>, CacheKey::Hash, CacheKey::HashTraits> EvalCacheMap;
EvalCacheMap m_cacheMap;
@@ -120,7 +120,7 @@ static constexpr unsigned bitWidthForMaxBytecodeStructLength = WTF::getMSBSetCon
macro(OpToThis) \
macro(OpCall) \
macro(OpTailCall) \
macro(OpCallEval) \
macro(OpCallDirectEval) \
macro(OpConstruct) \
macro(OpGetFromScope) \
macro(OpBitand) \
@@ -134,7 +134,7 @@ static constexpr unsigned bitWidthForMaxBytecodeStructLength = WTF::getMSBSetCon
#define FOR_EACH_OPCODE_WITH_CALL_LINK_INFO(macro) \
macro(OpCall) \
macro(OpTailCall) \
macro(OpCallEval) \
macro(OpCallDirectEval) \
macro(OpConstruct) \
macro(OpIteratorOpen) \
macro(OpIteratorNext) \
@@ -44,7 +44,7 @@ inline bool isOpcodeShape(OpcodeID opcodeID)
if (shape == OpCallShape) {
return opcodeID == op_call
|| opcodeID == op_tail_call
|| opcodeID == op_call_eval
|| opcodeID == op_call_direct_eval
|| opcodeID == op_call_varargs
|| opcodeID == op_tail_call_varargs
|| opcodeID == op_tail_call_forward_arguments;
@@ -44,7 +44,6 @@ const ClassInfo UnlinkedCodeBlock::s_info = { "UnlinkedCodeBlock"_s, nullptr, nu
UnlinkedCodeBlock::UnlinkedCodeBlock(VM& vm, Structure* structure, CodeType codeType, const ExecutableInfo& info, OptionSet<CodeGenerationMode> codeGenerationMode)
: Base(vm, structure)
, m_numVars(0)
, m_usesCallEval(false)
, m_numCalleeLocals(0)
, m_isConstructor(info.isConstructor())
, m_numParameters(0)
@@ -143,8 +143,6 @@ class UnlinkedCodeBlock : public JSCell {
void initializeLoopHintExecutionCounter();

bool isConstructor() const { return m_isConstructor; }
bool usesCallEval() const { return m_usesCallEval; }
void setUsesCallEval() { m_usesCallEval = true; }
SourceParseMode parseMode() const { return m_parseMode; }
bool isArrowFunction() const { return isArrowFunctionParseMode(parseMode()); }
DerivedContextType derivedContextType() const { return static_cast<DerivedContextType>(m_derivedContextType); }
@@ -402,7 +400,6 @@ class UnlinkedCodeBlock : public JSCell {
VirtualRegister m_scopeRegister;

unsigned m_numVars : 31;
unsigned m_usesCallEval : 1;
unsigned m_numCalleeLocals : 31;
unsigned m_isConstructor : 1;
unsigned m_numParameters : 31;
@@ -51,8 +51,6 @@ class UnlinkedCodeBlockGenerator {
JSParserScriptMode scriptMode() const { return m_codeBlock->scriptMode(); }
NeedsClassFieldInitializer needsClassFieldInitializer() const { return m_codeBlock->needsClassFieldInitializer(); }
PrivateBrandRequirement privateBrandRequirement() const { return m_codeBlock->privateBrandRequirement(); }
bool usesCallEval() const { return m_codeBlock->usesCallEval(); }
void setUsesCallEval() { return m_codeBlock->setUsesCallEval(); }
SourceParseMode parseMode() const { return m_codeBlock->parseMode(); }
bool isArrowFunction() { return m_codeBlock->isArrowFunction(); }
DerivedContextType derivedContextType() const { return m_codeBlock->derivedContextType(); }
@@ -3408,9 +3408,9 @@ RegisterID* BytecodeGenerator::emitCallInTailPosition(RegisterID* dst, RegisterI
return emitCall<OpCall>(dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall);
}

RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall)
RegisterID* BytecodeGenerator::emitCallDirectEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall)
{
return emitCall<OpCallEval>(dst, func, NoExpectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall);
return emitCall<OpCallDirectEval>(dst, func, NoExpectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall);
}

ExpectedFunction BytecodeGenerator::expectedFunctionForIdentifier(const Identifier& identifier)
@@ -3475,7 +3475,7 @@ template<typename CallOp>
RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall)
{
constexpr auto opcodeID = CallOp::opcodeID;
ASSERT(opcodeID == op_call || opcodeID == op_call_eval || opcodeID == op_tail_call);
ASSERT(opcodeID == op_call || opcodeID == op_call_direct_eval || opcodeID == op_tail_call);
ASSERT(func->refCount());

// Generate code for arguments.
@@ -3523,10 +3523,9 @@ RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, Expec
// Emit call.
ASSERT(dst);
ASSERT(dst != ignoredResult());
if constexpr (opcodeID == op_call_eval) {
m_codeBlock->setUsesCallEval();
CallOp::emit(this, dst, func, callArguments.argumentCountIncludingThis(), callArguments.stackOffset(), ecmaMode());
} else
if constexpr (opcodeID == op_call_direct_eval)
CallOp::emit(this, dst, func, callArguments.argumentCountIncludingThis(), callArguments.stackOffset(), thisRegister(), scopeRegister(), ecmaMode());
else
CallOp::emit(this, dst, func, callArguments.argumentCountIncludingThis(), callArguments.stackOffset());

if (expectedFunction != NoExpectedFunction)
@@ -816,7 +816,7 @@ namespace JSC {
ExpectedFunction expectedFunctionForIdentifier(const Identifier&);
RegisterID* emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall);
RegisterID* emitCallInTailPosition(RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall);
RegisterID* emitCallEval(RegisterID* dst, RegisterID* func, CallArguments&, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall);
RegisterID* emitCallDirectEval(RegisterID* dst, RegisterID* func, CallArguments&, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall);
RegisterID* emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall);
RegisterID* emitCallVarargsInTailPosition(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall);
RegisterID* emitCallForwardArgumentsInTailPosition(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall);
@@ -1204,7 +1204,7 @@ RegisterID* EvalFunctionCallNode::emitBytecode(BytecodeGenerator& generator, Reg
if (isOptionalChainBase())
generator.emitOptionalCheck(func.get());

return generator.emitCallEval(returnValue.get(), func.get(), callArguments, divot(), divotStart(), divotEnd(), DebuggableCall::No);
return generator.emitCallDirectEval(returnValue.get(), func.get(), callArguments, divot(), divotStart(), divotEnd(), DebuggableCall::No);
}

// ------------------------------ FunctionCallValueNode ----------------------------------
@@ -4492,7 +4492,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
case ConstructVarargs:
case ConstructForwardVarargs:
case TailCallForwardVarargsInlinedCaller:
case CallEval:
case CallDirectEval:
case DirectCall:
case DirectConstruct:
case DirectTailCallInlinedCaller:
@@ -540,7 +540,7 @@ class ByteCodeParser {

// Always flush arguments, except for 'this'. If 'this' is created by us,
// then make sure that it's never unboxed.
if (argument || m_graph.needsFlushedThis()) {
if (argument) {
if (setMode != ImmediateNakedSet)
flushDirect(reg);
} else if (!argument) {
@@ -855,7 +855,7 @@ class ByteCodeParser {

Node* addCallWithoutSettingResult(
NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
OpInfo prediction)
OpInfo prediction, Node* thisValueForEval = nullptr, Node* scopeForEval = nullptr)
{
addVarArgChild(callee);
size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
@@ -865,13 +865,17 @@ class ByteCodeParser {

for (int i = 0; i < argCount; ++i)
addVarArgChild(get(virtualRegisterForArgumentIncludingThis(i, registerOffset)));
if (op == CallDirectEval) {
addVarArgChild(Edge(thisValueForEval));
addVarArgChild(Edge(scopeForEval, KnownCellUse));
}

return addToGraph(Node::VarArg, op, opInfo, prediction);
}

Node* addCall(
Operand result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
SpeculatedType prediction)
SpeculatedType prediction, Node* thisValueForEval = nullptr, Node* scopeForEval = nullptr)
{
if (op == TailCall) {
if (allInlineFramesAreTailCalls())
@@ -880,8 +884,7 @@ class ByteCodeParser {
}


Node* call = addCallWithoutSettingResult(
op, opInfo, callee, argCount, registerOffset, OpInfo(prediction));
Node* call = addCallWithoutSettingResult(op, opInfo, callee, argCount, registerOffset, OpInfo(prediction), thisValueForEval, scopeForEval);
if (result.isValid())
set(result, call);
return call;
@@ -7248,11 +7251,11 @@ void ByteCodeParser::parseBlock(unsigned limit)
NEXT_OPCODE(op_construct_varargs);
}

case op_call_eval: {
auto bytecode = currentInstruction->as<OpCallEval>();
case op_call_direct_eval: {
auto bytecode = currentInstruction->as<OpCallDirectEval>();
int registerOffset = -bytecode.m_argv;
addCall(bytecode.m_dst, CallEval, OpInfo(bytecode.m_ecmaMode), get(bytecode.m_callee), bytecode.m_argc, registerOffset, getPrediction());
NEXT_OPCODE(op_call_eval);
addCall(bytecode.m_dst, CallDirectEval, OpInfo(bytecode.m_ecmaMode), get(bytecode.m_callee), bytecode.m_argc, registerOffset, getPrediction(), get(bytecode.m_thisValue), get(bytecode.m_scope));
NEXT_OPCODE(op_call_direct_eval);
}

case op_iterator_open: {

0 comments on commit fd6ff6b

Please sign in to comment.