Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8281711: Cherry-pick WebKit 613.1 stabilization fixes #772

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -32,6 +32,7 @@
#include <JavaScriptCore/JSObjectRef.h>
#include <JavaScriptCore/JSStringRef.h>
#include <algorithm>
#include <utility>

inline void JSRetain(JSClassRef context) { JSClassRetain(context); }
inline void JSRelease(JSClassRef context) { JSClassRelease(context); }
Expand Down
Expand Up @@ -176,6 +176,7 @@ static_assert((PROBE_EXECUTOR_OFFSET + PTR_SIZE) <= (PROBE_SIZE + OUT_SIZE), "Mu
#if CPU(X86)
#if COMPILER(GCC_COMPATIBLE)
asm (
".text" "\n"
".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
Expand Down Expand Up @@ -516,6 +517,7 @@ extern "C" __declspec(naked) void ctiMasmProbeTrampoline()
#if CPU(X86_64)
#if COMPILER(GCC_COMPATIBLE)
asm (
".text" "\n"
".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
Expand Down
Expand Up @@ -123,16 +123,50 @@ Ref<AccessCase> AccessCase::create(VM& vm, JSCell* owner, AccessType type, Cache

RefPtr<AccessCase> AccessCase::createTransition(
VM& vm, JSCell* owner, CacheableIdentifier identifier, PropertyOffset offset, Structure* oldStructure, Structure* newStructure,
const ObjectPropertyConditionSet& conditionSet, RefPtr<PolyProtoAccessChain>&& prototypeAccessChain)
const ObjectPropertyConditionSet& conditionSet, RefPtr<PolyProtoAccessChain>&& prototypeAccessChain, const StructureStubInfo& stubInfo)
{
RELEASE_ASSERT(oldStructure == newStructure->previousID());

// Skip optimizing the case where we need a realloc, if we don't have
// enough registers to make it happen.
if (GPRInfo::numberOfRegisters < 6
&& oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()
&& oldStructure->outOfLineCapacity()) {
return nullptr;
if (oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity()) {
// In 64 bits jsc uses 1 register for value, and it uses 2 registers in 32 bits
size_t requiredRegisters = 1; // stubInfo.valueRegs()
#if USE(JSVALUE32_64)
++requiredRegisters;
#endif

// 1 register for the property in 64 bits
++requiredRegisters;
#if USE(JSVALUE32_64)
// In 32 bits, jsc uses may use one extra register, if it is not a Cell
if (stubInfo.propertyRegs().tagGPR() != InvalidGPRReg)
++requiredRegisters;
#endif

// 1 register for the base in 64 bits
++requiredRegisters;
#if USE(JSVALUE32_64)
// In 32 bits, jsc uses may use one extra register, if it is not a Cell
if (stubInfo.baseRegs().tagGPR() != InvalidGPRReg)
++requiredRegisters;
#endif

if (stubInfo.m_stubInfoGPR != InvalidGPRReg)
++requiredRegisters;
if (stubInfo.m_arrayProfileGPR != InvalidGPRReg)
++requiredRegisters;

// One extra register for scratchGPR
++requiredRegisters;

// Check if we have enough registers when reallocating
if (oldStructure->outOfLineCapacity() && GPRInfo::numberOfRegisters < requiredRegisters)
return nullptr;

// If we are (re)allocating inline, jsc needs two extra scratchGPRs
if (!oldStructure->couldHaveIndexingHeader() && GPRInfo::numberOfRegisters < (requiredRegisters + 2))
return nullptr;
}

return adoptRef(*new AccessCase(vm, owner, Transition, identifier, offset, newStructure, conditionSet, WTFMove(prototypeAccessChain)));
Expand Down Expand Up @@ -1484,11 +1518,10 @@ void AccessCase::generateWithGuard(
notInt.link(&jit);
#if USE(JSVALUE64)
jit.unboxDoubleWithoutAssertions(valueRegs.payloadGPR(), scratch2GPR, state.scratchFPR);
failAndRepatch.append(jit.branchIfNaN(state.scratchFPR));
#else
failAndRepatch.append(jit.branch32(CCallHelpers::Above, valueRegs.tagGPR(), CCallHelpers::TrustedImm32(JSValue::LowestTag)));
jit.unboxDouble(valueRegs, state.scratchFPR);
#endif
failAndRepatch.append(jit.branchIfNaN(state.scratchFPR));
ready.link(&jit);

jit.zeroExtend32ToWord(propertyGPR, scratch2GPR);
Expand Down Expand Up @@ -2250,7 +2283,7 @@ void AccessCase::generateImpl(AccessGenerationState& state)

case Transition: {
ASSERT(!viaProxy());
// AccessCase::transition() should have returned null if this wasn't true.
// AccessCase::createTransition() should have returned null if this wasn't true.
RELEASE_ASSERT(GPRInfo::numberOfRegisters >= 6 || !structure()->outOfLineCapacity() || structure()->outOfLineCapacity() == newStructure()->outOfLineCapacity());

// NOTE: This logic is duplicated in AccessCase::doesCalls(). It's important that doesCalls() knows
Expand Down
Expand Up @@ -167,7 +167,7 @@ class AccessCase : public ThreadSafeRefCounted<AccessCase> {
Structure* = nullptr, const ObjectPropertyConditionSet& = ObjectPropertyConditionSet(), RefPtr<PolyProtoAccessChain>&& = nullptr);

static RefPtr<AccessCase> createTransition(VM&, JSCell* owner, CacheableIdentifier, PropertyOffset, Structure* oldStructure,
Structure* newStructure, const ObjectPropertyConditionSet&, RefPtr<PolyProtoAccessChain>&&);
Structure* newStructure, const ObjectPropertyConditionSet&, RefPtr<PolyProtoAccessChain>&&, const StructureStubInfo&);

static Ref<AccessCase> createDelete(VM&, JSCell* owner, CacheableIdentifier, PropertyOffset, Structure* oldStructure,
Structure* newStructure);
Expand Down
Expand Up @@ -3141,8 +3141,19 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
Structure* structure = nullptr;
if (base.isNull())
structure = globalObject->nullPrototypeObjectStructure();
else if (base.isObject())
structure = m_vm.structureCache.emptyObjectStructureConcurrently(globalObject, base.getObject(), JSFinalObject::defaultInlineCapacity());
else if (base.isObject()) {
// Having a bad time clears the structureCache, and so it should invalidate this structure.
bool isHavingABadTime = globalObject->isHavingABadTime();
WTF::loadLoadFence();
if (!isHavingABadTime)
m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
// Normally, we would always install a watchpoint. In this case, however, if we haveABadTime, we
// still want to optimize. There is no watchpoint for that case though, so we need to make sure this load
// does not get hoisted above the check.
WTF::loadLoadFence();
structure = m_vm.structureCache
.emptyObjectStructureConcurrently(globalObject, base.getObject(), JSFinalObject::defaultInlineCapacity());
}

if (structure) {
m_state.setShouldTryConstantFolding(true);
Expand Down
Expand Up @@ -6482,7 +6482,9 @@ void ByteCodeParser::parseBlock(unsigned limit)
FrozenValue* frozen = m_graph.freezeStrong(symbol);
addToGraph(CheckIsConstant, OpInfo(frozen), property);
} else if (auto* string = property->dynamicCastConstant<JSString*>(*m_vm)) {
if (auto* impl = string->tryGetValueImpl(); impl->isAtom() && !parseIndex(*const_cast<StringImpl*>(impl))) {
auto* impl = string->tryGetValueImpl();
ASSERT(impl); // FIXME: rdar://83902782
if (impl && impl->isAtom() && !parseIndex(*const_cast<StringImpl*>(impl))) {
uid = bitwise_cast<UniquedStringImpl*>(impl);
propertyCell = string;
m_graph.freezeStrong(string);
Expand Down Expand Up @@ -8864,7 +8866,9 @@ void ByteCodeParser::handlePutByVal(Bytecode bytecode, BytecodeIndex osrExitInde
FrozenValue* frozen = m_graph.freezeStrong(symbol);
addToGraph(CheckIsConstant, OpInfo(frozen), property);
} else if (auto* string = property->dynamicCastConstant<JSString*>(*m_vm)) {
if (auto* impl = string->tryGetValueImpl(); impl->isAtom() && !parseIndex(*const_cast<StringImpl*>(impl))) {
auto* impl = string->tryGetValueImpl();
ASSERT(impl); // FIXME: rdar://83902782
if (impl && impl->isAtom() && !parseIndex(*const_cast<StringImpl*>(impl))) {
uid = bitwise_cast<UniquedStringImpl*>(impl);
propertyCell = string;
m_graph.freezeStrong(string);
Expand Down
Expand Up @@ -839,8 +839,19 @@ class ConstantFoldingPhase : public Phase {
Structure* structure = nullptr;
if (base.isNull())
structure = globalObject->nullPrototypeObjectStructure();
else if (base.isObject())
structure = globalObject->vm().structureCache.emptyObjectStructureConcurrently(globalObject, base.getObject(), JSFinalObject::defaultInlineCapacity());
else if (base.isObject()) {
// Having a bad time clears the structureCache, and so it should invalidate this structure.
bool isHavingABadTime = globalObject->isHavingABadTime();
WTF::loadLoadFence();
if (!isHavingABadTime)
m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
// Normally, we would always install a watchpoint. In this case, however, if we haveABadTime, we
// still want to optimize. There is no watchpoint for that case though, so we need to make sure this load
// does not get hoisted above the check.
WTF::loadLoadFence();
structure = globalObject->vm().structureCache
.emptyObjectStructureConcurrently(globalObject, base.getObject(), JSFinalObject::defaultInlineCapacity());
}

if (structure) {
node->convertToNewObject(m_graph.registerStructure(structure));
Expand Down
Expand Up @@ -1843,10 +1843,12 @@ bool Graph::canDoFastSpread(Node* node, const AbstractValue& value)
if (!value.m_structure.isFinite())
return false;

ArrayPrototype* arrayPrototype = globalObjectFor(node->child1()->origin.semantic)->arrayPrototype();
JSGlobalObject* globalObject = globalObjectFor(node->child1()->origin.semantic);
ArrayPrototype* arrayPrototype = globalObject->arrayPrototype();
bool allGood = true;
value.m_structure.forEach([&] (RegisteredStructure structure) {
allGood &= structure->hasMonoProto()
allGood &= structure->globalObject() == globalObject
&& structure->hasMonoProto()
&& structure->storedPrototype() == arrayPrototype
&& !structure->isDictionary()
&& structure->getConcurrently(m_vm.propertyNames->iteratorSymbol.impl()) == invalidOffset
Expand Down
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2015-2020 Apple Inc. All rights reserved.
* Copyright (C) 2015-2021 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
Expand Down Expand Up @@ -228,8 +228,17 @@ class Relationship {
if (*this == other)
return true;

if (m_right->isInt32Constant() && other.m_right->isInt32Constant())
return (m_right->asInt32() + m_offset) == (other.m_right->asInt32() + other.m_offset);
if (m_right->isInt32Constant() && other.m_right->isInt32Constant()) {
int thisRight = m_right->asInt32();
int otherRight = other.m_right->asInt32();

if (sumOverflows<int>(thisRight, m_offset))
return false;
if (sumOverflows<int>(otherRight, other.m_offset))
return false;

return (thisRight + m_offset) == (otherRight + other.m_offset);
}
return false;
}

Expand Down Expand Up @@ -1391,7 +1400,25 @@ class IntegerRangeOptimizationPhase : public Phase {
case ArithAbs: {
if (node->child1().useKind() != Int32Use)
break;
setRelationship(Relationship(node, m_zero, Relationship::GreaterThan, -1));

// If ArithAbs cares about overflow, then INT32_MIN input will cause OSR exit.
// Thus we can safely say `x >= 0`.
if (shouldCheckOverflow(node->arithMode())) {
setRelationship(Relationship(node, m_zero, Relationship::GreaterThan, -1));
break;
}

// If ArithAbs does not care about overflow, it can return INT32_MIN if the input is INT32_MIN.
// If minValue is not INT32_MIN, we can still say it is `x >= 0`.
int minValue = std::numeric_limits<int>::min();
auto iter = m_relationships.find(node->child1().node());
if (iter != m_relationships.end()) {
for (Relationship relationship : iter->value)
minValue = std::max(minValue, relationship.minValueOfLeft());
}

if (minValue > std::numeric_limits<int>::min())
setRelationship(Relationship(node, m_zero, Relationship::GreaterThan, -1));
break;
}

Expand Down
Expand Up @@ -2538,6 +2538,18 @@ JSC_DEFINE_JIT_OPERATION(operationEnumeratorInByVal, EncodedJSValue, (JSGlobalOb
RELEASE_AND_RETURN(scope, JSValue::encode(jsBoolean(CommonSlowPaths::opInByVal(globalObject, base, propertyName))));
}

JSC_DEFINE_JIT_OPERATION(operationEnumeratorGetByValGeneric, EncodedJSValue, (JSGlobalObject* globalObject, JSCell* baseCell, EncodedJSValue propertyNameValue, uint32_t index, int32_t modeNumber, JSPropertyNameEnumerator* enumerator))
{
VM& vm = globalObject->vm();
CallFrame* callFrame = DECLARE_CALL_FRAME(vm);
JITOperationPrologueCallFrameTracer tracer(vm, callFrame);
auto scope = DECLARE_THROW_SCOPE(vm);

JSValue property = JSValue::decode(propertyNameValue);
JSPropertyNameEnumerator::Mode mode = static_cast<JSPropertyNameEnumerator::Mode>(modeNumber);
RELEASE_AND_RETURN(scope, JSValue::encode(CommonSlowPaths::opEnumeratorGetByVal(globalObject, baseCell, property, index, mode, enumerator)));
}

JSC_DEFINE_JIT_OPERATION(operationEnumeratorHasOwnProperty, EncodedJSValue, (JSGlobalObject* globalObject, EncodedJSValue baseValue, EncodedJSValue propertyNameValue, uint32_t index, int32_t modeNumber))
{
VM& vm = globalObject->vm();
Expand Down
Expand Up @@ -112,6 +112,7 @@ JSC_DECLARE_JIT_OPERATION(operationEnumeratorNextUpdatePropertyName, EncodedJSVa
JSC_DECLARE_JIT_OPERATION(operationEnumeratorInByVal, EncodedJSValue, (JSGlobalObject*, EncodedJSValue, EncodedJSValue, uint32_t, int32_t));
JSC_DECLARE_JIT_OPERATION(operationEnumeratorHasOwnProperty, EncodedJSValue, (JSGlobalObject*, EncodedJSValue, EncodedJSValue, uint32_t, int32_t));
JSC_DECLARE_JIT_OPERATION(operationEnumeratorRecoverNameAndGetByVal, EncodedJSValue, (JSGlobalObject*, JSCell*, uint32_t, JSPropertyNameEnumerator*));
JSC_DECLARE_JIT_OPERATION(operationEnumeratorGetByValGeneric, EncodedJSValue, (JSGlobalObject*, JSCell*, EncodedJSValue, uint32_t, int32_t, JSPropertyNameEnumerator*));

JSC_DECLARE_JIT_OPERATION(operationNewRegexpWithLastIndex, JSCell*, (JSGlobalObject*, JSCell*, EncodedJSValue));
JSC_DECLARE_JIT_OPERATION(operationNewArray, char*, (JSGlobalObject*, Structure*, void*, size_t));
Expand Down