Skip to content
Permalink
Browse files

[MERGE #6196 @atulkatti] ChakraCore servicing update for July, 2019

Merge pull request #6196 from atulkatti:servicing/1907

This release addresses the following issues:
CVE-2019-1001
CVE-2019-1062
CVE-2019-1092
CVE-2019-1103
CVE-2019-1106
CVE-2019-1107
  • Loading branch information...
atulkatti committed Jul 9, 2019
2 parents ba1f445 + 12c31f0 commit 75162b7f2d8ac2b37d17564e9c979ba1bae707e8
@@ -1 +1 @@
1.11.10
1.11.11
@@ -4151,13 +4151,17 @@ BackwardPass::UpdateImplicitCallBailOutKind(IR::Instr *const instr, bool needsBa

IR::BailOutKind implicitCallBailOutKind = needsBailOutOnImplicitCall ? IR::BailOutOnImplicitCalls : IR::BailOutInvalid;

const IR::BailOutKind instrBailOutKind = instr->GetBailOutKind();
IR::BailOutKind instrBailOutKind = instr->GetBailOutKind();
if (instrBailOutKind & IR::BailOutMarkTempObject)
{
// Don't remove the implicit call pre op bailout for mark temp object
// Remove the mark temp object bit, as we don't need it after the dead store pass
instr->SetBailOutKind(instrBailOutKind & ~IR::BailOutMarkTempObject);
return true;
instrBailOutKind &= ~IR::BailOutMarkTempObject;
instr->SetBailOutKind(instrBailOutKind);

if (!instr->GetBailOutInfo()->canDeadStore)
{
return true;
}
}

const IR::BailOutKind instrImplicitCallBailOutKind = instrBailOutKind & ~IR::BailOutKindBits;
@@ -27,7 +27,7 @@ class BailOutInfo
BailOutInfo(uint32 bailOutOffset, Func* bailOutFunc) :
bailOutOffset(bailOutOffset), bailOutFunc(bailOutFunc),
byteCodeUpwardExposedUsed(nullptr), polymorphicCacheIndex((uint)-1), startCallCount(0), startCallInfo(nullptr), bailOutInstr(nullptr),
totalOutParamCount(0), argOutSyms(nullptr), bailOutRecord(nullptr), wasCloned(false), isInvertedBranch(false), sharedBailOutKind(true), isLoopTopBailOutInfo(false),
totalOutParamCount(0), argOutSyms(nullptr), bailOutRecord(nullptr), wasCloned(false), isInvertedBranch(false), sharedBailOutKind(true), isLoopTopBailOutInfo(false), canDeadStore(true),
outParamInlinedArgSlot(nullptr), liveVarSyms(nullptr), liveLosslessInt32Syms(nullptr), liveFloat64Syms(nullptr),
branchConditionOpnd(nullptr),
stackLiteralBailOutInfoCount(0), stackLiteralBailOutInfo(nullptr)
@@ -69,6 +69,7 @@ class BailOutInfo
#endif
bool wasCloned;
bool isInvertedBranch;
bool canDeadStore;
bool sharedBailOutKind;
bool isLoopTopBailOutInfo;

@@ -345,6 +345,9 @@ Func::Codegen(JitArenaAllocator *alloc, JITTimeWorkItem * workItem,
case RejitReason::TrackIntOverflowDisabled:
outputData->disableTrackCompoundedIntOverflow = TRUE;
break;
case RejitReason::MemOpDisabled:
outputData->disableMemOp = TRUE;
break;
default:
Assume(UNREACHED);
}
@@ -1124,6 +1127,12 @@ Func::IsTrackCompoundedIntOverflowDisabled() const
return (HasProfileInfo() && GetReadOnlyProfileInfo()->IsTrackCompoundedIntOverflowDisabled()) || m_output.IsTrackCompoundedIntOverflowDisabled();
}

bool
Func::IsMemOpDisabled() const
{
return (HasProfileInfo() && GetReadOnlyProfileInfo()->IsMemOpDisabled()) || m_output.IsMemOpDisabled();
}

bool
Func::IsArrayCheckHoistDisabled() const
{
@@ -995,6 +995,7 @@ static const unsigned __int64 c_debugFillPattern8 = 0xcececececececece;
void SetScopeObjSym(StackSym * sym);
StackSym * GetScopeObjSym();
bool IsTrackCompoundedIntOverflowDisabled() const;
bool IsMemOpDisabled() const;
bool IsArrayCheckHoistDisabled() const;
bool IsStackArgOptDisabled() const;
bool IsSwitchOptDisabled() const;
@@ -2624,7 +2624,7 @@ GlobOpt::OptInstr(IR::Instr *&instr, bool* isInstrRemoved)
!(instr->IsJitProfilingInstr()) &&
this->currentBlock->loop && !IsLoopPrePass() &&
!func->IsJitInDebugMode() &&
(func->HasProfileInfo() && !func->GetReadOnlyProfileInfo()->IsMemOpDisabled()) &&
!func->IsMemOpDisabled() &&
this->currentBlock->loop->doMemOp)
{
CollectMemOpInfo(instrPrev, instr, src1Val, src2Val);
@@ -16531,6 +16531,7 @@ GlobOpt::GenerateBailOutMarkTempObjectIfNeeded(IR::Instr * instr, IR::Opnd * opn
if (instr->HasBailOutInfo())
{
instr->SetBailOutKind(instr->GetBailOutKind() | IR::BailOutMarkTempObject);
instr->GetBailOutInfo()->canDeadStore = false;
}
else
{
@@ -16540,6 +16541,11 @@ GlobOpt::GenerateBailOutMarkTempObjectIfNeeded(IR::Instr * instr, IR::Opnd * opn
|| (instr->m_opcode == Js::OpCode::FromVar && !opnd->GetValueType().IsPrimitive())
|| propertySymOpnd == nullptr
|| !propertySymOpnd->IsTypeCheckProtected())
{
this->GenerateBailAtOperation(&instr, IR::BailOutMarkTempObject);
instr->GetBailOutInfo()->canDeadStore = false;
}
else if (propertySymOpnd->MayHaveImplicitCall())
{
this->GenerateBailAtOperation(&instr, IR::BailOutMarkTempObject);
}
@@ -16680,7 +16686,14 @@ GlobOpt::GenerateInductionVariableChangeForMemOp(Loop *loop, byte unroll, IR::In
}
else
{
uint size = (loopCount->LoopCountMinusOneConstantValue() + 1) * unroll;
int32 loopCountMinusOnePlusOne;
int32 size;
if (Int32Math::Add(loopCount->LoopCountMinusOneConstantValue(), 1, &loopCountMinusOnePlusOne) ||
Int32Math::Mul(loopCountMinusOnePlusOne, unroll, &size))
{
throw Js::RejitException(RejitReason::MemOpDisabled);
}
Assert(size > 0);
sizeOpnd = IR::IntConstOpnd::New(size, IRType::TyUint32, localFunc);
}
loop->memOpInfo->inductionVariableOpndPerUnrollMap->Add(unroll, sizeOpnd);
@@ -974,7 +974,8 @@ GlobOptBlockData::MergeValueInfo(
fromDataValueInfo->AsArrayValueInfo(),
fromDataSym,
symsRequiringCompensation,
symsCreatedForMerge);
symsCreatedForMerge,
isLoopBackEdge);
}

// Consider: If both values are VarConstantValueInfo with the same value, we could
@@ -1072,7 +1073,8 @@ ValueInfo *GlobOptBlockData::MergeArrayValueInfo(
const ArrayValueInfo *const fromDataValueInfo,
Sym *const arraySym,
BVSparse<JitArenaAllocator> *const symsRequiringCompensation,
BVSparse<JitArenaAllocator> *const symsCreatedForMerge)
BVSparse<JitArenaAllocator> *const symsCreatedForMerge,
bool isLoopBackEdge)
{
Assert(mergedValueType.IsAnyOptimizedArray());
Assert(toDataValueInfo);
@@ -1095,7 +1097,7 @@ ValueInfo *GlobOptBlockData::MergeArrayValueInfo(
}
else
{
if (!this->globOpt->IsLoopPrePass())
if (!this->globOpt->IsLoopPrePass() && !isLoopBackEdge)
{
// Adding compensation code in the prepass won't help, as the symstores would again be different in the main pass.
Assert(symsRequiringCompensation);
@@ -1123,7 +1125,7 @@ ValueInfo *GlobOptBlockData::MergeArrayValueInfo(
}
else
{
if (!this->globOpt->IsLoopPrePass())
if (!this->globOpt->IsLoopPrePass() && !isLoopBackEdge)
{
Assert(symsRequiringCompensation);
symsRequiringCompensation->Set(arraySym->m_id);
@@ -1150,7 +1152,7 @@ ValueInfo *GlobOptBlockData::MergeArrayValueInfo(
}
else
{
if (!this->globOpt->IsLoopPrePass())
if (!this->globOpt->IsLoopPrePass() && !isLoopBackEdge)
{
Assert(symsRequiringCompensation);
symsRequiringCompensation->Set(arraySym->m_id);
@@ -264,7 +264,7 @@ class GlobOptBlockData
Value * MergeValues(Value *toDataValue, Value *fromDataValue, Sym *fromDataSym, bool isLoopBackEdge, BVSparse<JitArenaAllocator> *const symsRequiringCompensation, BVSparse<JitArenaAllocator> *const symsCreatedForMerge);
ValueInfo * MergeValueInfo(Value *toDataVal, Value *fromDataVal, Sym *fromDataSym, bool isLoopBackEdge, bool sameValueNumber, BVSparse<JitArenaAllocator> *const symsRequiringCompensation, BVSparse<JitArenaAllocator> *const symsCreatedForMerge);
JsTypeValueInfo * MergeJsTypeValueInfo(JsTypeValueInfo * toValueInfo, JsTypeValueInfo * fromValueInfo, bool isLoopBackEdge, bool sameValueNumber);
ValueInfo * MergeArrayValueInfo(const ValueType mergedValueType, const ArrayValueInfo *const toDataValueInfo, const ArrayValueInfo *const fromDataValueInfo, Sym *const arraySym, BVSparse<JitArenaAllocator> *const symsRequiringCompensation, BVSparse<JitArenaAllocator> *const symsCreatedForMerge);
ValueInfo * MergeArrayValueInfo(const ValueType mergedValueType, const ArrayValueInfo *const toDataValueInfo, const ArrayValueInfo *const fromDataValueInfo, Sym *const arraySym, BVSparse<JitArenaAllocator> *const symsRequiringCompensation, BVSparse<JitArenaAllocator> *const symsCreatedForMerge, bool isLoopBackEdge);

// Argument Tracking
public:
@@ -410,6 +410,14 @@ GlobOpt::ProcessFieldKills(IR::Instr *instr, BVSparse<JitArenaAllocator> *bv, bo
if (inGlobOpt)
{
KillObjectHeaderInlinedTypeSyms(this->currentBlock, false);
if (this->objectTypeSyms)
{
if (this->currentBlock->globOptData.maybeWrittenTypeSyms == nullptr)
{
this->currentBlock->globOptData.maybeWrittenTypeSyms = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
}
this->currentBlock->globOptData.maybeWrittenTypeSyms->Or(this->objectTypeSyms);
}
}

// fall through
@@ -65,6 +65,12 @@ JITOutput::IsTrackCompoundedIntOverflowDisabled() const
return m_outputData->disableTrackCompoundedIntOverflow != FALSE;
}

bool
JITOutput::IsMemOpDisabled() const
{
return m_outputData->disableMemOp != FALSE;
}

bool
JITOutput::IsArrayCheckHoistDisabled() const
{
@@ -22,6 +22,7 @@ class JITOutput
void RecordXData(BYTE * xdata);
#endif
bool IsTrackCompoundedIntOverflowDisabled() const;
bool IsMemOpDisabled() const;
bool IsArrayCheckHoistDisabled() const;
bool IsStackArgOptDisabled() const;
bool IsSwitchOptDisabled() const;
@@ -1234,6 +1234,10 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor
{
body->GetAnyDynamicProfileInfo()->DisableTrackCompoundedIntOverflow();
}
if (jitWriteData.disableMemOp)
{
body->GetAnyDynamicProfileInfo()->DisableMemOp();
}
}

if (jitWriteData.disableInlineApply)
@@ -962,7 +962,8 @@ PropertySymOpnd::IsObjectHeaderInlined() const
bool
PropertySymOpnd::ChangesObjectLayout() const
{
JITTypeHolder cachedType = this->IsMono() ? this->GetType() : this->GetFirstEquivalentType();
JITTypeHolder cachedType = this->HasInitialType() ? this->GetInitialType() :
this->IsMono() ? this->GetType() : this->GetFirstEquivalentType();

JITTypeHolder finalType = this->GetFinalType();

@@ -987,13 +988,11 @@ PropertySymOpnd::ChangesObjectLayout() const
// This is the case where the type transition actually occurs. (This is the only case that's detectable
// during the loop pre-pass, since final types are not in place yet.)

Assert(cachedType != nullptr && Js::DynamicType::Is(cachedType->GetTypeId()));

const JITTypeHandler * cachedTypeHandler = cachedType->GetTypeHandler();
const JITTypeHandler * initialTypeHandler = initialType->GetTypeHandler();

return cachedTypeHandler->GetInlineSlotCapacity() != initialTypeHandler->GetInlineSlotCapacity() ||
cachedTypeHandler->GetOffsetOfInlineSlots() != initialTypeHandler->GetOffsetOfInlineSlots();
// If no final type has been set in the forward pass, then we have no way of knowing how the object shape will evolve here.
// If the initial type is object-header-inlined, assume that the layout may change.
return initialTypeHandler->IsObjectHeaderInlinedTypeHandler();
}

return false;
@@ -1138,7 +1138,8 @@ class PropertySymOpnd sealed : public SymOpnd
// fall back on live cache. Similarly, for fixed method checks.
bool MayHaveImplicitCall() const
{
return !IsRootObjectNonConfigurableFieldLoad() && !UsesFixedValue() && (!IsTypeCheckSeqCandidate() || !IsTypeCheckProtected());
return !IsRootObjectNonConfigurableFieldLoad() && !UsesFixedValue() && (!IsTypeCheckSeqCandidate() || !IsTypeCheckProtected()
|| (IsLoadedFromProto() && NeedsWriteGuardTypeCheck()));
}

// Is the instruction involving this operand part of a type check sequence? This is different from IsObjTypeSpecOptimized
@@ -17,7 +17,7 @@
// ChakraCore version number definitions (used in ChakraCore binary metadata)
#define CHAKRA_CORE_MAJOR_VERSION 1
#define CHAKRA_CORE_MINOR_VERSION 11
#define CHAKRA_CORE_PATCH_VERSION 10
#define CHAKRA_CORE_PATCH_VERSION 11
#define CHAKRA_CORE_VERSION_RELEASE_QFE 0 // Redundant with PATCH_VERSION. Keep this value set to 0.

// -------------
@@ -838,37 +838,42 @@ typedef struct JITOutputIDL
boolean disableStackArgOpt;
boolean disableSwitchOpt;
boolean disableTrackCompoundedIntOverflow;
boolean isInPrereservedRegion;
boolean disableMemOp;

boolean isInPrereservedRegion;
boolean hasBailoutInstr;

boolean hasJittedStackClosure;
IDL_PAD1(0)

unsigned short pdataCount;
unsigned short xdataSize;

unsigned short argUsedForBranch;
IDL_PAD2(1)

int localVarSlotsOffset; // FunctionEntryPointInfo only

int localVarChangedOffset; // FunctionEntryPointInfo only
unsigned int frameHeight;


unsigned int codeSize;
unsigned int throwMapOffset;

unsigned int throwMapCount;
unsigned int inlineeFrameOffsetArrayOffset;
unsigned int inlineeFrameOffsetArrayCount;

unsigned int inlineeFrameOffsetArrayCount;
unsigned int propertyGuardCount;

unsigned int ctorCachesCount;
X64_PAD4(2)

#if TARGET_64
CHAKRA_PTR xdataAddr;
#elif defined(_M_ARM)
unsigned int xdataOffset;
#else
X86_PAD4(0)
X86_PAD4(3)
#endif
CHAKRA_PTR codeAddress;
CHAKRA_PTR thunkAddress;

0 comments on commit 75162b7

Please sign in to comment.
You can’t perform that action at this time.