Skip to content
Permalink
Browse files

[1.11>master] [MERGE #6196 @atulkatti] ChakraCore servicing update fo…

…r July, 2019

Merge pull request #6196 from atulkatti:servicing/1907

This release addresses the following issues:
CVE-2019-1001
CVE-2019-1062
CVE-2019-1092
CVE-2019-1103
CVE-2019-1106
CVE-2019-1107
  • Loading branch information...
atulkatti committed Jul 9, 2019
2 parents 5fa1ffe + 75162b7 commit f8a6b80b4e5f7caeb7d3a61fe6836ebf69cfff85
@@ -4369,6 +4369,59 @@ BackwardPass::TraceBlockUses(BasicBlock * block, bool isStart)

#endif

bool
BackwardPass::UpdateImplicitCallBailOutKind(IR::Instr *const instr, bool needsBailOutOnImplicitCall)
{
Assert(instr);
Assert(instr->HasBailOutInfo());

IR::BailOutKind implicitCallBailOutKind = needsBailOutOnImplicitCall ? IR::BailOutOnImplicitCalls : IR::BailOutInvalid;

IR::BailOutKind instrBailOutKind = instr->GetBailOutKind();
if (instrBailOutKind & IR::BailOutMarkTempObject)
{
// Remove the mark temp object bit, as we don't need it after the dead store pass
instrBailOutKind &= ~IR::BailOutMarkTempObject;
instr->SetBailOutKind(instrBailOutKind);

if (!instr->GetBailOutInfo()->canDeadStore)
{
return true;
}
}

const IR::BailOutKind instrImplicitCallBailOutKind = instrBailOutKind & ~IR::BailOutKindBits;
if(instrImplicitCallBailOutKind == IR::BailOutOnImplicitCallsPreOp)
{
if(needsBailOutOnImplicitCall)
{
implicitCallBailOutKind = IR::BailOutOnImplicitCallsPreOp;
}
}
else if(instrImplicitCallBailOutKind != IR::BailOutOnImplicitCalls && instrImplicitCallBailOutKind != IR::BailOutInvalid)
{
// This bailout kind (the value of 'instrImplicitCallBailOutKind') must guarantee that implicit calls will not happen.
// If it doesn't make such a guarantee, it must be possible to merge this bailout kind with an implicit call bailout
// kind, and therefore should be part of BailOutKindBits.
Assert(!needsBailOutOnImplicitCall);
return true;
}

if(instrImplicitCallBailOutKind == implicitCallBailOutKind)
{
return true;
}

const IR::BailOutKind newBailOutKind = instrBailOutKind - instrImplicitCallBailOutKind + implicitCallBailOutKind;
if(newBailOutKind == IR::BailOutInvalid)
{
return false;
}

instr->SetBailOutKind(newBailOutKind);
return true;
}

bool
BackwardPass::ProcessNoImplicitCallUses(IR::Instr *const instr)
{
@@ -27,7 +27,7 @@ class BailOutInfo
BailOutInfo(uint32 bailOutOffset, Func* bailOutFunc) :
bailOutOffset(bailOutOffset), bailOutFunc(bailOutFunc),
byteCodeUpwardExposedUsed(nullptr), polymorphicCacheIndex((uint)-1), startCallCount(0), startCallInfo(nullptr), bailOutInstr(nullptr),
totalOutParamCount(0), argOutSyms(nullptr), bailOutRecord(nullptr), wasCloned(false), isInvertedBranch(false), sharedBailOutKind(true), isLoopTopBailOutInfo(false),
totalOutParamCount(0), argOutSyms(nullptr), bailOutRecord(nullptr), wasCloned(false), isInvertedBranch(false), sharedBailOutKind(true), isLoopTopBailOutInfo(false), canDeadStore(true),
outParamInlinedArgSlot(nullptr), liveVarSyms(nullptr), liveLosslessInt32Syms(nullptr), liveFloat64Syms(nullptr),
branchConditionOpnd(nullptr),
stackLiteralBailOutInfoCount(0), stackLiteralBailOutInfo(nullptr),
@@ -107,6 +107,7 @@ class BailOutInfo
#endif
bool wasCloned;
bool isInvertedBranch;
bool canDeadStore;
bool sharedBailOutKind;
bool isLoopTopBailOutInfo;

@@ -355,6 +355,9 @@ Func::Codegen(JitArenaAllocator *alloc, JITTimeWorkItem * workItem,
case RejitReason::TrackIntOverflowDisabled:
outputData->disableTrackCompoundedIntOverflow = TRUE;
break;
case RejitReason::MemOpDisabled:
outputData->disableMemOp = TRUE;
break;
default:
Assume(UNREACHED);
}
@@ -1112,6 +1115,12 @@ Func::IsTrackCompoundedIntOverflowDisabled() const
return (HasProfileInfo() && GetReadOnlyProfileInfo()->IsTrackCompoundedIntOverflowDisabled()) || m_output.IsTrackCompoundedIntOverflowDisabled();
}

bool
Func::IsMemOpDisabled() const
{
return (HasProfileInfo() && GetReadOnlyProfileInfo()->IsMemOpDisabled()) || m_output.IsMemOpDisabled();
}

bool
Func::IsArrayCheckHoistDisabled() const
{
@@ -967,6 +967,7 @@ static const unsigned __int64 c_debugFillPattern8 = 0xcececececececece;
bool HasStackSymForFormal(Js::ArgSlot formalsIndex);

bool IsTrackCompoundedIntOverflowDisabled() const;
bool IsMemOpDisabled() const;
bool IsArrayCheckHoistDisabled() const;
bool IsStackArgOptDisabled() const;
bool IsSwitchOptDisabled() const;
@@ -2675,7 +2675,7 @@ GlobOpt::OptInstr(IR::Instr *&instr, bool* isInstrRemoved)
!(instr->IsJitProfilingInstr()) &&
this->currentBlock->loop && !IsLoopPrePass() &&
!func->IsJitInDebugMode() &&
(func->HasProfileInfo() && !func->GetReadOnlyProfileInfo()->IsMemOpDisabled()) &&
!func->IsMemOpDisabled() &&
this->currentBlock->loop->doMemOp)
{
CollectMemOpInfo(instrPrev, instr, src1Val, src2Val);
@@ -16864,6 +16864,7 @@ GlobOpt::GenerateBailOutMarkTempObjectIfNeeded(IR::Instr * instr, IR::Opnd * opn
if (instr->HasBailOutInfo())
{
instr->SetBailOutKind(instr->GetBailOutKind() | IR::BailOutMarkTempObject);
instr->GetBailOutInfo()->canDeadStore = false;
}
else
{
@@ -16873,6 +16874,11 @@ GlobOpt::GenerateBailOutMarkTempObjectIfNeeded(IR::Instr * instr, IR::Opnd * opn
|| (instr->m_opcode == Js::OpCode::FromVar && !opnd->GetValueType().IsPrimitive())
|| propertySymOpnd == nullptr
|| !propertySymOpnd->IsTypeCheckProtected())
{
this->GenerateBailAtOperation(&instr, IR::BailOutMarkTempObject);
instr->GetBailOutInfo()->canDeadStore = false;
}
else if (propertySymOpnd->MayHaveImplicitCall())
{
this->GenerateBailAtOperation(&instr, IR::BailOutMarkTempObject);
}
@@ -17013,7 +17019,14 @@ GlobOpt::GenerateInductionVariableChangeForMemOp(Loop *loop, byte unroll, IR::In
}
else
{
uint size = (loopCount->LoopCountMinusOneConstantValue() + 1) * unroll;
int32 loopCountMinusOnePlusOne;
int32 size;
if (Int32Math::Add(loopCount->LoopCountMinusOneConstantValue(), 1, &loopCountMinusOnePlusOne) ||
Int32Math::Mul(loopCountMinusOnePlusOne, unroll, &size))
{
throw Js::RejitException(RejitReason::MemOpDisabled);
}
Assert(size > 0);
sizeOpnd = IR::IntConstOpnd::New(size, IRType::TyUint32, localFunc);
}
loop->memOpInfo->inductionVariableOpndPerUnrollMap->Add(unroll, sizeOpnd);
@@ -974,7 +974,8 @@ GlobOptBlockData::MergeValueInfo(
fromDataValueInfo->AsArrayValueInfo(),
fromDataSym,
symsRequiringCompensation,
symsCreatedForMerge);
symsCreatedForMerge,
isLoopBackEdge);
}

// Consider: If both values are VarConstantValueInfo with the same value, we could
@@ -1072,7 +1073,8 @@ ValueInfo *GlobOptBlockData::MergeArrayValueInfo(
const ArrayValueInfo *const fromDataValueInfo,
Sym *const arraySym,
BVSparse<JitArenaAllocator> *const symsRequiringCompensation,
BVSparse<JitArenaAllocator> *const symsCreatedForMerge)
BVSparse<JitArenaAllocator> *const symsCreatedForMerge,
bool isLoopBackEdge)
{
Assert(mergedValueType.IsAnyOptimizedArray());
Assert(toDataValueInfo);
@@ -1095,7 +1097,7 @@ ValueInfo *GlobOptBlockData::MergeArrayValueInfo(
}
else
{
if (!this->globOpt->IsLoopPrePass())
if (!this->globOpt->IsLoopPrePass() && !isLoopBackEdge)
{
// Adding compensation code in the prepass won't help, as the symstores would again be different in the main pass.
Assert(symsRequiringCompensation);
@@ -1123,7 +1125,7 @@ ValueInfo *GlobOptBlockData::MergeArrayValueInfo(
}
else
{
if (!this->globOpt->IsLoopPrePass())
if (!this->globOpt->IsLoopPrePass() && !isLoopBackEdge)
{
Assert(symsRequiringCompensation);
symsRequiringCompensation->Set(arraySym->m_id);
@@ -1150,7 +1152,7 @@ ValueInfo *GlobOptBlockData::MergeArrayValueInfo(
}
else
{
if (!this->globOpt->IsLoopPrePass())
if (!this->globOpt->IsLoopPrePass() && !isLoopBackEdge)
{
Assert(symsRequiringCompensation);
symsRequiringCompensation->Set(arraySym->m_id);
@@ -264,7 +264,7 @@ class GlobOptBlockData
Value * MergeValues(Value *toDataValue, Value *fromDataValue, Sym *fromDataSym, bool isLoopBackEdge, BVSparse<JitArenaAllocator> *const symsRequiringCompensation, BVSparse<JitArenaAllocator> *const symsCreatedForMerge);
ValueInfo * MergeValueInfo(Value *toDataVal, Value *fromDataVal, Sym *fromDataSym, bool isLoopBackEdge, bool sameValueNumber, BVSparse<JitArenaAllocator> *const symsRequiringCompensation, BVSparse<JitArenaAllocator> *const symsCreatedForMerge);
JsTypeValueInfo * MergeJsTypeValueInfo(JsTypeValueInfo * toValueInfo, JsTypeValueInfo * fromValueInfo, bool isLoopBackEdge, bool sameValueNumber);
ValueInfo * MergeArrayValueInfo(const ValueType mergedValueType, const ArrayValueInfo *const toDataValueInfo, const ArrayValueInfo *const fromDataValueInfo, Sym *const arraySym, BVSparse<JitArenaAllocator> *const symsRequiringCompensation, BVSparse<JitArenaAllocator> *const symsCreatedForMerge);
ValueInfo * MergeArrayValueInfo(const ValueType mergedValueType, const ArrayValueInfo *const toDataValueInfo, const ArrayValueInfo *const fromDataValueInfo, Sym *const arraySym, BVSparse<JitArenaAllocator> *const symsRequiringCompensation, BVSparse<JitArenaAllocator> *const symsCreatedForMerge, bool isLoopBackEdge);

// Argument Tracking
public:
@@ -409,6 +409,14 @@ GlobOpt::ProcessFieldKills(IR::Instr *instr, BVSparse<JitArenaAllocator> *bv, bo
if (inGlobOpt)
{
KillObjectHeaderInlinedTypeSyms(this->currentBlock, false);
if (this->objectTypeSyms)
{
if (this->currentBlock->globOptData.maybeWrittenTypeSyms == nullptr)
{
this->currentBlock->globOptData.maybeWrittenTypeSyms = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc);
}
this->currentBlock->globOptData.maybeWrittenTypeSyms->Or(this->objectTypeSyms);
}
}

// fall through
@@ -65,6 +65,12 @@ JITOutput::IsTrackCompoundedIntOverflowDisabled() const
return m_outputData->disableTrackCompoundedIntOverflow != FALSE;
}

bool
JITOutput::IsMemOpDisabled() const
{
return m_outputData->disableMemOp != FALSE;
}

bool
JITOutput::IsArrayCheckHoistDisabled() const
{
@@ -22,6 +22,7 @@ class JITOutput
void RecordXData(BYTE * xdata);
#endif
bool IsTrackCompoundedIntOverflowDisabled() const;
bool IsMemOpDisabled() const;
bool IsArrayCheckHoistDisabled() const;
bool IsStackArgOptDisabled() const;
bool IsSwitchOptDisabled() const;
@@ -1234,6 +1234,10 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor
{
body->GetAnyDynamicProfileInfo()->DisableTrackCompoundedIntOverflow();
}
if (jitWriteData.disableMemOp)
{
body->GetAnyDynamicProfileInfo()->DisableMemOp();
}
}

if (jitWriteData.disableInlineApply)
@@ -962,7 +962,8 @@ PropertySymOpnd::IsObjectHeaderInlined() const
bool
PropertySymOpnd::ChangesObjectLayout() const
{
JITTypeHolder cachedType = this->IsMono() ? this->GetType() : this->GetFirstEquivalentType();
JITTypeHolder cachedType = this->HasInitialType() ? this->GetInitialType() :
this->IsMono() ? this->GetType() : this->GetFirstEquivalentType();

JITTypeHolder finalType = this->GetFinalType();

@@ -987,13 +988,11 @@ PropertySymOpnd::ChangesObjectLayout() const
// This is the case where the type transition actually occurs. (This is the only case that's detectable
// during the loop pre-pass, since final types are not in place yet.)

Assert(cachedType != nullptr && Js::DynamicType::Is(cachedType->GetTypeId()));

const JITTypeHandler * cachedTypeHandler = cachedType->GetTypeHandler();
const JITTypeHandler * initialTypeHandler = initialType->GetTypeHandler();

return cachedTypeHandler->GetInlineSlotCapacity() != initialTypeHandler->GetInlineSlotCapacity() ||
cachedTypeHandler->GetOffsetOfInlineSlots() != initialTypeHandler->GetOffsetOfInlineSlots();
// If no final type has been set in the forward pass, then we have no way of knowing how the object shape will evolve here.
// If the initial type is object-header-inlined, assume that the layout may change.
return initialTypeHandler->IsObjectHeaderInlinedTypeHandler();
}

return false;
@@ -1162,7 +1162,8 @@ class PropertySymOpnd sealed : public SymOpnd
// fall back on live cache. Similarly, for fixed method checks.
bool MayHaveImplicitCall() const
{
return !IsRootObjectNonConfigurableFieldLoad() && !UsesFixedValue() && (!IsTypeCheckSeqCandidate() || !IsTypeCheckProtected());
return !IsRootObjectNonConfigurableFieldLoad() && !UsesFixedValue() && (!IsTypeCheckSeqCandidate() || !IsTypeCheckProtected()
|| (IsLoadedFromProto() && NeedsWriteGuardTypeCheck()));
}

// Is the instruction involving this operand part of a type check sequence? This is different from IsObjTypeSpecOptimized
@@ -847,37 +847,42 @@ typedef struct JITOutputIDL
boolean disableStackArgOpt;
boolean disableSwitchOpt;
boolean disableTrackCompoundedIntOverflow;
boolean isInPrereservedRegion;
boolean disableMemOp;

boolean isInPrereservedRegion;
boolean hasBailoutInstr;

boolean hasJittedStackClosure;
IDL_PAD1(0)

unsigned short pdataCount;
unsigned short xdataSize;

unsigned short argUsedForBranch;
IDL_PAD2(1)

int localVarSlotsOffset; // FunctionEntryPointInfo only

int localVarChangedOffset; // FunctionEntryPointInfo only
unsigned int frameHeight;


unsigned int codeSize;
unsigned int throwMapOffset;

unsigned int throwMapCount;
unsigned int inlineeFrameOffsetArrayOffset;
unsigned int inlineeFrameOffsetArrayCount;

unsigned int inlineeFrameOffsetArrayCount;
unsigned int propertyGuardCount;

unsigned int ctorCachesCount;
X64_PAD4(2)

#if TARGET_64
CHAKRA_PTR xdataAddr;
#elif defined(_M_ARM)
unsigned int xdataOffset;
#else
X86_PAD4(0)
X86_PAD4(3)
#endif
CHAKRA_PTR codeAddress;
CHAKRA_PTR thunkAddress;

0 comments on commit f8a6b80

Please sign in to comment.
You can’t perform that action at this time.