Permalink
19963 lines (16780 sloc) 751 KB
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Importer XX
XX XX
XX Imports the given method and converts it to semantic trees XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "corexcep.h"
#define Verify(cond, msg) \
do \
{ \
if (!(cond)) \
{ \
verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
} \
} while (0)
#define VerifyOrReturn(cond, msg) \
do \
{ \
if (!(cond)) \
{ \
verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
return; \
} \
} while (0)
#define VerifyOrReturnSpeculative(cond, msg, speculative) \
do \
{ \
if (speculative) \
{ \
if (!(cond)) \
{ \
return false; \
} \
} \
else \
{ \
if (!(cond)) \
{ \
verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
return false; \
} \
} \
} while (0)
/*****************************************************************************/
void Compiler::impInit()
{
#ifdef DEBUG
impTreeList = nullptr;
impTreeLast = nullptr;
impInlinedCodeSize = 0;
#endif
}
/*****************************************************************************
*
* Pushes the given tree on the stack.
*/
void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
{
/* Check for overflow. If inlining, we may be using a bigger stack */
if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
(verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
{
BADCODE("stack overflow");
}
#ifdef DEBUG
// If we are pushing a struct, make certain we know the precise type!
if (tree->TypeGet() == TYP_STRUCT)
{
assert(ti.IsType(TI_STRUCT));
CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
assert(clsHnd != NO_CLASS_HANDLE);
}
if (tiVerificationNeeded && !ti.IsDead())
{
assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
// The ti type is consistent with the tree type.
//
// On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
// In the verification type system, we always transform "native int" to "TI_INT".
// Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
// attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
// when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
// method used in the last disjunct allows exactly this mismatch.
assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
NormaliseForStack(typeInfo(tree->TypeGet()))));
// If it is a struct type, make certain we normalized the primitive types
assert(!ti.IsType(TI_STRUCT) ||
info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
}
#if VERBOSE_VERIFY
if (VERBOSE && tiVerificationNeeded)
{
printf("\n");
printf(TI_DUMP_PADDING);
printf("About to push to stack: ");
ti.Dump();
}
#endif // VERBOSE_VERIFY
#endif // DEBUG
verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
{
compLongUsed = true;
}
else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
{
compFloatingPointUsed = true;
}
}
inline void Compiler::impPushNullObjRefOnStack()
{
impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
}
// This method gets called when we run into unverifiable code
// (and we are verifying the method)
inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
DEBUGARG(unsigned line))
{
// Remember that the code is not verifiable
// Note that the method may yet pass canSkipMethodVerification(),
// and so the presence of unverifiable code may not be an issue.
tiIsVerifiableCode = FALSE;
#ifdef DEBUG
const char* tail = strrchr(file, '\\');
if (tail)
{
file = tail + 1;
}
if (JitConfig.JitBreakOnUnsafeCode())
{
assert(!"Unsafe code detected");
}
#endif
JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
msg, info.compFullName, impCurOpcName, impCurOpcOffs));
if (verNeedsVerification() || compIsForImportOnly())
{
JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
msg, info.compFullName, impCurOpcName, impCurOpcOffs));
verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
}
}
inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
DEBUGARG(unsigned line))
{
JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
msg, info.compFullName, impCurOpcName, impCurOpcOffs));
#ifdef DEBUG
// BreakIfDebuggerPresent();
if (getBreakOnBadCode())
{
assert(!"Typechecking error");
}
#endif
RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
UNREACHABLE();
}
// helper function that will tell us if the IL instruction at the addr passed
// by param consumes an address at the top of the stack. We use it to save
// us lvAddrTaken
bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
{
assert(!compIsForInlining());
OPCODE opcode;
opcode = (OPCODE)getU1LittleEndian(codeAddr);
switch (opcode)
{
// case CEE_LDFLDA: We're taking this one out as if you have a sequence
// like
//
// ldloca.0
// ldflda whatever
//
// of a primitivelike struct, you end up after morphing with addr of a local
// that's not marked as addrtaken, which is wrong. Also ldflda is usually used
// for structs that contain other structs, which isnt a case we handle very
// well now for other reasons.
case CEE_LDFLD:
{
// We won't collapse small fields. This is probably not the right place to have this
// check, but we're only using the function for this purpose, and is easy to factor
// out if we need to do so.
CORINFO_RESOLVED_TOKEN resolvedToken;
impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
CORINFO_CLASS_HANDLE clsHnd;
var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
// Preserve 'small' int types
if (!varTypeIsSmall(lclTyp))
{
lclTyp = genActualType(lclTyp);
}
if (varTypeIsSmall(lclTyp))
{
return false;
}
return true;
}
default:
break;
}
return false;
}
void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
{
pResolvedToken->tokenContext = impTokenLookupContextHandle;
pResolvedToken->tokenScope = info.compScopeHnd;
pResolvedToken->token = getU4LittleEndian(addr);
pResolvedToken->tokenType = kind;
if (!tiVerificationNeeded)
{
info.compCompHnd->resolveToken(pResolvedToken);
}
else
{
Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
}
}
/*****************************************************************************
*
* Pop one tree from the stack.
*/
StackEntry Compiler::impPopStack()
{
if (verCurrentState.esStackDepth == 0)
{
BADCODE("stack underflow");
}
#ifdef DEBUG
#if VERBOSE_VERIFY
if (VERBOSE && tiVerificationNeeded)
{
JITDUMP("\n");
printf(TI_DUMP_PADDING);
printf("About to pop from the stack: ");
const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
ti.Dump();
}
#endif // VERBOSE_VERIFY
#endif // DEBUG
return verCurrentState.esStack[--verCurrentState.esStackDepth];
}
/*****************************************************************************
*
* Peep at n'th (0-based) tree on the top of the stack.
*/
StackEntry& Compiler::impStackTop(unsigned n)
{
if (verCurrentState.esStackDepth <= n)
{
BADCODE("stack underflow");
}
return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
}
unsigned Compiler::impStackHeight()
{
return verCurrentState.esStackDepth;
}
/*****************************************************************************
* Some of the trees are spilled specially. While unspilling them, or
* making a copy, these need to be handled specially. The function
* enumerates the operators possible after spilling.
*/
#ifdef DEBUG // only used in asserts
static bool impValidSpilledStackEntry(GenTree* tree)
{
if (tree->gtOper == GT_LCL_VAR)
{
return true;
}
if (tree->OperIsConst())
{
return true;
}
return false;
}
#endif
/*****************************************************************************
*
* The following logic is used to save/restore stack contents.
* If 'copy' is true, then we make a copy of the trees on the stack. These
* have to all be cloneable/spilled values.
*/
void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
{
savePtr->ssDepth = verCurrentState.esStackDepth;
if (verCurrentState.esStackDepth)
{
savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
if (copy)
{
StackEntry* table = savePtr->ssTrees;
/* Make a fresh copy of all the stack entries */
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
{
table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
GenTree* tree = verCurrentState.esStack[level].val;
assert(impValidSpilledStackEntry(tree));
switch (tree->gtOper)
{
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_LCL_VAR:
table->val = gtCloneExpr(tree);
break;
default:
assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
break;
}
}
}
else
{
memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
}
}
}
void Compiler::impRestoreStackState(SavedStack* savePtr)
{
verCurrentState.esStackDepth = savePtr->ssDepth;
if (verCurrentState.esStackDepth)
{
memcpy(verCurrentState.esStack, savePtr->ssTrees,
verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
}
}
/*****************************************************************************
*
* Get the tree list started for a new basic block.
*/
inline void Compiler::impBeginTreeList()
{
assert(impTreeList == nullptr && impTreeLast == nullptr);
impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
}
/*****************************************************************************
*
* Store the given start and end stmt in the given basic block. This is
* mostly called by impEndTreeList(BasicBlock *block). It is called
* directly only for handling CEE_LEAVEs out of finally-protected try's.
*/
inline void Compiler::impEndTreeList(BasicBlock* block, GenTree* firstStmt, GenTree* lastStmt)
{
assert(firstStmt->gtOper == GT_STMT);
assert(lastStmt->gtOper == GT_STMT);
/* Make the list circular, so that we can easily walk it backwards */
firstStmt->gtPrev = lastStmt;
/* Store the tree list in the basic block */
block->bbTreeList = firstStmt;
/* The block should not already be marked as imported */
assert((block->bbFlags & BBF_IMPORTED) == 0);
block->bbFlags |= BBF_IMPORTED;
}
/*****************************************************************************
*
* Store the current tree list in the given basic block.
*/
inline void Compiler::impEndTreeList(BasicBlock* block)
{
assert(impTreeList->gtOper == GT_BEG_STMTS);
GenTree* firstTree = impTreeList->gtNext;
if (!firstTree)
{
/* The block should not already be marked as imported */
assert((block->bbFlags & BBF_IMPORTED) == 0);
// Empty block. Just mark it as imported
block->bbFlags |= BBF_IMPORTED;
}
else
{
// Ignore the GT_BEG_STMTS
assert(firstTree->gtPrev == impTreeList);
impEndTreeList(block, firstTree, impTreeLast);
}
#ifdef DEBUG
if (impLastILoffsStmt != nullptr)
{
impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
impLastILoffsStmt = nullptr;
}
impTreeList = impTreeLast = nullptr;
#endif
}
/*****************************************************************************
*
* Check that storing the given tree doesnt mess up the semantic order. Note
* that this has only limited value as we can only check [0..chkLevel).
*/
inline void Compiler::impAppendStmtCheck(GenTree* stmt, unsigned chkLevel)
{
#ifndef DEBUG
return;
#else
assert(stmt->gtOper == GT_STMT);
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
{
chkLevel = verCurrentState.esStackDepth;
}
if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
{
return;
}
GenTree* tree = stmt->gtStmt.gtStmtExpr;
// Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
if (tree->gtFlags & GTF_CALL)
{
for (unsigned level = 0; level < chkLevel; level++)
{
assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
}
}
if (tree->gtOper == GT_ASG)
{
// For an assignment to a local variable, all references of that
// variable have to be spilled. If it is aliased, all calls and
// indirect accesses have to be spilled
if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
{
unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
for (unsigned level = 0; level < chkLevel; level++)
{
assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
assert(!lvaTable[lclNum].lvAddrExposed ||
(verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
}
}
// If the access may be to global memory, all side effects have to be spilled.
else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
{
for (unsigned level = 0; level < chkLevel; level++)
{
assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
}
}
}
#endif
}
/*****************************************************************************
*
* Append the given GT_STMT node to the current block's tree list.
* [0..chkLevel) is the portion of the stack which we will check for
* interference with stmt and spill if needed.
*/
inline void Compiler::impAppendStmt(GenTree* stmt, unsigned chkLevel)
{
assert(stmt->gtOper == GT_STMT);
noway_assert(impTreeLast != nullptr);
/* If the statement being appended has any side-effects, check the stack
to see if anything needs to be spilled to preserve correct ordering. */
GenTree* expr = stmt->gtStmt.gtStmtExpr;
unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
// Assignment to (unaliased) locals don't count as a side-effect as
// we handle them specially using impSpillLclRefs(). Temp locals should
// be fine too.
if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
!(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
{
unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
assert(flags == (op2Flags | GTF_ASG));
flags = op2Flags;
}
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
{
chkLevel = verCurrentState.esStackDepth;
}
if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
{
assert(chkLevel <= verCurrentState.esStackDepth);
if (flags)
{
// If there is a call, we have to spill global refs
bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
if (expr->gtOper == GT_ASG)
{
GenTree* lhs = expr->gtGetOp1();
// If we are assigning to a global ref, we have to spill global refs on stack.
// TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
// GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
// revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
if (!expr->OperIsBlkOp())
{
// If we are assigning to a global ref, we have to spill global refs on stack
if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
{
spillGlobEffects = true;
}
}
else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
((lhs->OperGet() == GT_LCL_VAR) &&
(lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
{
spillGlobEffects = true;
}
}
impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
}
else
{
impSpillSpecialSideEff();
}
}
impAppendStmtCheck(stmt, chkLevel);
/* Point 'prev' at the previous node, so that we can walk backwards */
stmt->gtPrev = impTreeLast;
/* Append the expression statement to the list */
impTreeLast->gtNext = stmt;
impTreeLast = stmt;
#ifdef FEATURE_SIMD
impMarkContiguousSIMDFieldAssignments(stmt);
#endif
/* Once we set impCurStmtOffs in an appended tree, we are ready to
report the following offsets. So reset impCurStmtOffs */
if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
{
impCurStmtOffsSet(BAD_IL_OFFSET);
}
#ifdef DEBUG
if (impLastILoffsStmt == nullptr)
{
impLastILoffsStmt = stmt;
}
if (verbose)
{
printf("\n\n");
gtDispTree(stmt);
}
#endif
}
/*****************************************************************************
*
* Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
*/
inline void Compiler::impInsertStmtBefore(GenTree* stmt, GenTree* stmtBefore)
{
assert(stmt->gtOper == GT_STMT);
assert(stmtBefore->gtOper == GT_STMT);
GenTree* stmtPrev = stmtBefore->gtPrev;
stmt->gtPrev = stmtPrev;
stmt->gtNext = stmtBefore;
stmtPrev->gtNext = stmt;
stmtBefore->gtPrev = stmt;
}
/*****************************************************************************
*
* Append the given expression tree to the current block's tree list.
* Return the newly created statement.
*/
GenTree* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
{
assert(tree);
/* Allocate an 'expression statement' node */
GenTree* expr = gtNewStmt(tree, offset);
/* Append the statement to the current block's stmt list */
impAppendStmt(expr, chkLevel);
return expr;
}
/*****************************************************************************
*
* Insert the given exression tree before GT_STMT "stmtBefore"
*/
void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, GenTree* stmtBefore)
{
assert(stmtBefore->gtOper == GT_STMT);
/* Allocate an 'expression statement' node */
GenTree* expr = gtNewStmt(tree, offset);
/* Append the statement to the current block's stmt list */
impInsertStmtBefore(expr, stmtBefore);
}
/*****************************************************************************
*
* Append an assignment of the given value to a temp to the current tree list.
* curLevel is the stack level for which the spill to the temp is being done.
*/
void Compiler::impAssignTempGen(unsigned tmp,
GenTree* val,
unsigned curLevel,
GenTree** pAfterStmt, /* = NULL */
IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
BasicBlock* block /* = NULL */
)
{
GenTree* asg = gtNewTempAssign(tmp, val);
if (!asg->IsNothingNode())
{
if (pAfterStmt)
{
GenTree* asgStmt = gtNewStmt(asg, ilOffset);
*pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
}
else
{
impAppendTree(asg, curLevel, impCurStmtOffs);
}
}
}
/*****************************************************************************
* same as above, but handle the valueclass case too
*/
void Compiler::impAssignTempGen(unsigned tmpNum,
GenTree* val,
CORINFO_CLASS_HANDLE structType,
unsigned curLevel,
GenTree** pAfterStmt, /* = NULL */
IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
BasicBlock* block /* = NULL */
)
{
GenTree* asg;
if (varTypeIsStruct(val))
{
assert(tmpNum < lvaCount);
assert(structType != NO_CLASS_HANDLE);
// if the method is non-verifiable the assert is not true
// so at least ignore it in the case when verification is turned on
// since any block that tries to use the temp would have failed verification.
var_types varType = lvaTable[tmpNum].lvType;
assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
lvaSetStruct(tmpNum, structType, false);
// Now, set the type of the struct value. Note that lvaSetStruct may modify the type
// of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
// that has been passed in for the value being assigned to the temp, in which case we
// need to set 'val' to that same type.
// Note also that if we always normalized the types of any node that might be a struct
// type, this would not be necessary - but that requires additional JIT/EE interface
// calls that may not actually be required - e.g. if we only access a field of a struct.
val->gtType = lvaTable[tmpNum].lvType;
GenTree* dst = gtNewLclvNode(tmpNum, val->gtType);
asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
}
else
{
asg = gtNewTempAssign(tmpNum, val);
}
if (!asg->IsNothingNode())
{
if (pAfterStmt)
{
GenTree* asgStmt = gtNewStmt(asg, ilOffset);
*pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
}
else
{
impAppendTree(asg, curLevel, impCurStmtOffs);
}
}
}
/*****************************************************************************
*
* Pop the given number of values from the stack and return a list node with
* their values.
* The 'prefixTree' argument may optionally contain an argument
* list that is prepended to the list returned from this function.
*
* The notion of prepended is a bit misleading in that the list is backwards
* from the way I would expect: The first element popped is at the end of
* the returned list, and prefixTree is 'before' that, meaning closer to
* the end of the list. To get to prefixTree, you have to walk to the
* end of the list.
*
* For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
* such we reverse its meaning such that returnValue has a reversed
* prefixTree at the head of the list.
*/
GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
{
assert(sig == nullptr || count == sig->numArgs);
CORINFO_CLASS_HANDLE structType;
GenTreeArgList* treeList;
if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
{
treeList = nullptr;
}
else
{ // ARG_ORDER_L2R
treeList = prefixTree;
}
while (count--)
{
StackEntry se = impPopStack();
typeInfo ti = se.seTypeInfo;
GenTree* temp = se.val;
if (varTypeIsStruct(temp))
{
// Morph trees that aren't already OBJs or MKREFANY to be OBJs
assert(ti.IsType(TI_STRUCT));
structType = ti.GetClassHandleForValueClass();
#ifdef DEBUG
if (verbose)
{
printf("Calling impNormStructVal on:\n");
gtDispTree(temp);
}
#endif
temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
#ifdef DEBUG
if (verbose)
{
printf("resulting tree:\n");
gtDispTree(temp);
}
#endif
}
/* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
treeList = gtNewListNode(temp, treeList);
}
if (sig != nullptr)
{
if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
{
// Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggerred from the prestub of this methods,
// all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
}
CORINFO_ARG_LIST_HANDLE argLst = sig->args;
CORINFO_CLASS_HANDLE argClass;
CORINFO_CLASS_HANDLE argRealClass;
GenTreeArgList* args;
for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
{
PREFIX_ASSUME(args != nullptr);
CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
// insert implied casts (from float to double or double to float)
if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
{
args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), false, TYP_DOUBLE);
}
else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
{
args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), false, TYP_FLOAT);
}
// insert any widening or narrowing casts for backwards compatibility
args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
{
// Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
// but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
// primitive types.
// We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
// details).
if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
{
args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
}
// Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggered from the prestub of this methods,
// all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
}
argLst = info.compCompHnd->getArgNext(argLst);
}
}
if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
{
// Prepend the prefixTree
// Simple in-place reversal to place treeList
// at the end of a reversed prefixTree
while (prefixTree != nullptr)
{
GenTreeArgList* next = prefixTree->Rest();
prefixTree->Rest() = treeList;
treeList = prefixTree;
prefixTree = next;
}
}
return treeList;
}
/*****************************************************************************
*
* Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
* The first "skipReverseCount" items are not reversed.
*/
GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
{
assert(skipReverseCount <= count);
GenTreeArgList* list = impPopList(count, sig);
// reverse the list
if (list == nullptr || skipReverseCount == count)
{
return list;
}
GenTreeArgList* ptr = nullptr; // Initialized to the first node that needs to be reversed
GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
if (skipReverseCount == 0)
{
ptr = list;
}
else
{
lastSkipNode = list;
// Get to the first node that needs to be reversed
for (unsigned i = 0; i < skipReverseCount - 1; i++)
{
lastSkipNode = lastSkipNode->Rest();
}
PREFIX_ASSUME(lastSkipNode != nullptr);
ptr = lastSkipNode->Rest();
}
GenTreeArgList* reversedList = nullptr;
do
{
GenTreeArgList* tmp = ptr->Rest();
ptr->Rest() = reversedList;
reversedList = ptr;
ptr = tmp;
} while (ptr != nullptr);
if (skipReverseCount)
{
lastSkipNode->Rest() = reversedList;
return list;
}
else
{
return reversedList;
}
}
/*****************************************************************************
Assign (copy) the structure from 'src' to 'dest'. The structure is a value
class of type 'clsHnd'. It returns the tree that should be appended to the
statement list that represents the assignment.
Temp assignments may be appended to impTreeList if spilling is necessary.
curLevel is the stack level for which a spill may be being done.
*/
GenTree* Compiler::impAssignStruct(GenTree* dest,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
GenTree** pAfterStmt, /* = NULL */
BasicBlock* block /* = NULL */
)
{
assert(varTypeIsStruct(dest));
while (dest->gtOper == GT_COMMA)
{
assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
// Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
if (pAfterStmt)
{
*pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
}
else
{
impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
}
// set dest to the second thing
dest = dest->gtOp.gtOp2;
}
assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
{
// Make this a NOP
return gtNewNothingNode();
}
// TODO-1stClassStructs: Avoid creating an address if it is not needed,
// or re-creating a Blk node if it is.
GenTree* destAddr;
if (dest->gtOper == GT_IND || dest->OperIsBlk())
{
destAddr = dest->gtOp.gtOp1;
}
else
{
destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
}
return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
}
/*****************************************************************************/
GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
GenTree** pAfterStmt, /* = NULL */
BasicBlock* block /* = NULL */
)
{
var_types destType;
GenTree* dest = nullptr;
unsigned destFlags = 0;
#if defined(UNIX_AMD64_ABI)
assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
// TODO-ARM-BUG: Does ARM need this?
// TODO-ARM64-BUG: Does ARM64 need this?
assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
(src->TypeGet() != TYP_STRUCT &&
(GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
#else // !defined(UNIX_AMD64_ABI)
assert(varTypeIsStruct(src));
assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
src->gtOper == GT_COMMA ||
(src->TypeGet() != TYP_STRUCT &&
(GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
#endif // !defined(UNIX_AMD64_ABI)
if (destAddr->OperGet() == GT_ADDR)
{
GenTree* destNode = destAddr->gtGetOp1();
// If the actual destination is a local, or already a block node, or is a node that
// will be morphed, don't insert an OBJ(ADDR).
if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk() ||
((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet())))
{
dest = destNode;
}
destType = destNode->TypeGet();
}
else
{
destType = src->TypeGet();
}
var_types asgType = src->TypeGet();
if (src->gtOper == GT_CALL)
{
if (src->AsCall()->TreatAsHasRetBufArg(this))
{
// Case of call returning a struct via hidden retbuf arg
// insert the return value buffer into the argument list as first byref parameter
src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
// now returns void, not a struct
src->gtType = TYP_VOID;
// return the morphed call node
return src;
}
else
{
// Case of call returning a struct in one or more registers.
var_types returnType = (var_types)src->gtCall.gtReturnType;
// We won't use a return buffer, so change the type of src->gtType to 'returnType'
src->gtType = genActualType(returnType);
// First we try to change this to "LclVar/LclFld = call"
//
if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
{
// If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
// That is, the IR will be of the form lclVar = call for multi-reg return
//
GenTree* lcl = destAddr->gtOp.gtOp1;
if (src->AsCall()->HasMultiRegRetVal())
{
// Mark the struct LclVar as used in a MultiReg return context
// which currently makes it non promotable.
// TODO-1stClassStructs: Eliminate this pessimization when we can more generally
// handle multireg returns.
lcl->gtFlags |= GTF_DONT_CSE;
lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
}
else // The call result is not a multireg return
{
// We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
lcl->ChangeOper(GT_LCL_FLD);
fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
lcl->gtType = src->gtType;
asgType = src->gtType;
}
dest = lcl;
#if defined(_TARGET_ARM_)
// TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
// but that method has not been updadted to include ARM.
impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
lcl->gtFlags |= GTF_DONT_CSE;
#elif defined(UNIX_AMD64_ABI)
// Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
// Make the struct non promotable. The eightbytes could contain multiple fields.
// TODO-1stClassStructs: Eliminate this pessimization when we can more generally
// handle multireg returns.
// TODO-Cleanup: Why is this needed here? This seems that it will set this even for
// non-multireg returns.
lcl->gtFlags |= GTF_DONT_CSE;
lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
#endif
}
else // we don't have a GT_ADDR of a GT_LCL_VAR
{
// !!! The destination could be on stack. !!!
// This flag will let us choose the correct write barrier.
asgType = returnType;
destFlags = GTF_IND_TGTANYWHERE;
}
}
}
else if (src->gtOper == GT_RET_EXPR)
{
GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
noway_assert(call->gtOper == GT_CALL);
if (call->HasRetBufArg())
{
// insert the return value buffer into the argument list as first byref parameter
call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
// now returns void, not a struct
src->gtType = TYP_VOID;
call->gtType = TYP_VOID;
// We already have appended the write to 'dest' GT_CALL's args
// So now we just return an empty node (pruning the GT_RET_EXPR)
return src;
}
else
{
// Case of inline method returning a struct in one or more registers.
//
var_types returnType = (var_types)call->gtReturnType;
// We won't need a return buffer
asgType = returnType;
src->gtType = genActualType(returnType);
call->gtType = src->gtType;
// If we've changed the type, and it no longer matches a local destination,
// we must use an indirection.
if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
{
dest = nullptr;
}
// !!! The destination could be on stack. !!!
// This flag will let us choose the correct write barrier.
destFlags = GTF_IND_TGTANYWHERE;
}
}
else if (src->OperIsBlk())
{
asgType = impNormStructType(structHnd);
if (src->gtOper == GT_OBJ)
{
assert(src->gtObj.gtClass == structHnd);
}
}
else if (src->gtOper == GT_INDEX)
{
asgType = impNormStructType(structHnd);
assert(src->gtIndex.gtStructElemClass == structHnd);
}
else if (src->gtOper == GT_MKREFANY)
{
// Since we are assigning the result of a GT_MKREFANY,
// "destAddr" must point to a refany.
GenTree* destAddrClone;
destAddr =
impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0);
assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
GenTree* typeSlot =
gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
// append the assign of the pointer value
GenTree* asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
if (pAfterStmt)
{
*pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
}
else
{
impAppendTree(asg, curLevel, impCurStmtOffs);
}
// return the assign of the type value, to be appended
return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
}
else if (src->gtOper == GT_COMMA)
{
// The second thing is the struct or its address.
assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
if (pAfterStmt)
{
*pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
}
else
{
impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
}
// Evaluate the second thing using recursion.
return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
}
else if (src->IsLocal())
{
asgType = src->TypeGet();
}
else if (asgType == TYP_STRUCT)
{
asgType = impNormStructType(structHnd);
src->gtType = asgType;
}
if (dest == nullptr)
{
// TODO-1stClassStructs: We shouldn't really need a block node as the destination
// if this is a known struct type.
if (asgType == TYP_STRUCT)
{
dest = gtNewObjNode(structHnd, destAddr);
gtSetObjGcInfo(dest->AsObj());
// Although an obj as a call argument was always assumed to be a globRef
// (which is itself overly conservative), that is not true of the operands
// of a block assignment.
dest->gtFlags &= ~GTF_GLOB_REF;
dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
}
else if (varTypeIsStruct(asgType))
{
dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
}
else
{
dest = gtNewOperNode(GT_IND, asgType, destAddr);
}
}
else
{
dest->gtType = asgType;
}
dest->gtFlags |= destFlags;
destFlags = dest->gtFlags;
// return an assignment node, to be appended
GenTree* asgNode = gtNewAssignNode(dest, src);
gtBlockOpInit(asgNode, dest, src, false);
// TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
// of assignments.
if ((destFlags & GTF_DONT_CSE) == 0)
{
dest->gtFlags &= ~(GTF_DONT_CSE);
}
return asgNode;
}
/*****************************************************************************
Given a struct value, and the class handle for that structure, return
the expression for the address for that structure value.
willDeref - does the caller guarantee to dereference the pointer.
*/
GenTree* Compiler::impGetStructAddr(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool willDeref)
{
assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
var_types type = structVal->TypeGet();
genTreeOps oper = structVal->gtOper;
if (oper == GT_OBJ && willDeref)
{
assert(structVal->gtObj.gtClass == structHnd);
return (structVal->gtObj.Addr());
}
else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
structVal->OperIsSimdHWIntrinsic())
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
// The 'return value' is now the temp itself
type = genActualType(lvaTable[tmpNum].TypeGet());
GenTree* temp = gtNewLclvNode(tmpNum, type);
temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
return temp;
}
else if (oper == GT_COMMA)
{
assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
GenTree* oldTreeLast = impTreeLast;
structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
structVal->gtType = TYP_BYREF;
if (oldTreeLast != impTreeLast)
{
// Some temp assignment statement was placed on the statement list
// for Op2, but that would be out of order with op1, so we need to
// spill op1 onto the statement list after whatever was last
// before we recursed on Op2 (i.e. before whatever Op2 appended).
impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
structVal->gtOp.gtOp1 = gtNewNothingNode();
}
return (structVal);
}
return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
//------------------------------------------------------------------------
// impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
// and optionally determine the GC layout of the struct.
//
// Arguments:
// structHnd - The class handle for the struct type of interest.
// gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
// into which the gcLayout will be written.
// pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
// which will be set to the number of GC fields in the struct.
// pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
// type, set to the SIMD base type
//
// Return Value:
// The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
// The gcLayout will be returned using the pointers provided by the caller, if non-null.
// It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
//
// Assumptions:
// The caller must set gcLayout to nullptr OR ensure that it is large enough
// (see ICorStaticInfo::getClassGClayout in corinfo.h).
//
// Notes:
// Normalizing the type involves examining the struct type to determine if it should
// be modified to one that is handled specially by the JIT, possibly being a candidate
// for full enregistration, e.g. TYP_SIMD16.
var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
BYTE* gcLayout,
unsigned* pNumGCVars,
var_types* pSimdBaseType)
{
assert(structHnd != NO_CLASS_HANDLE);
const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
var_types structType = TYP_STRUCT;
// On coreclr the check for GC includes a "may" to account for the special
// ByRef like span structs. The added check for "CONTAINS_STACK_PTR" is the particular bit.
// When this is set the struct will contain a ByRef that could be a GC pointer or a native
// pointer.
const bool mayContainGCPtrs =
((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
#ifdef FEATURE_SIMD
// Check to see if this is a SIMD type.
if (featureSIMD && !mayContainGCPtrs)
{
unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
{
unsigned int sizeBytes;
var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
if (simdBaseType != TYP_UNKNOWN)
{
assert(sizeBytes == originalSize);
structType = getSIMDTypeForSize(sizeBytes);
if (pSimdBaseType != nullptr)
{
*pSimdBaseType = simdBaseType;
}
// Also indicate that we use floating point registers.
compFloatingPointUsed = true;
}
}
}
#endif // FEATURE_SIMD
// Fetch GC layout info if requested
if (gcLayout != nullptr)
{
unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
// Verify that the quick test up above via the class attributes gave a
// safe view of the type's GCness.
//
// Note there are cases where mayContainGCPtrs is true but getClassGClayout
// does not report any gc fields.
assert(mayContainGCPtrs || (numGCVars == 0));
if (pNumGCVars != nullptr)
{
*pNumGCVars = numGCVars;
}
}
else
{
// Can't safely ask for number of GC pointers without also
// asking for layout.
assert(pNumGCVars == nullptr);
}
return structType;
}
//****************************************************************************
// Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
// it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
//
GenTree* Compiler::impNormStructVal(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool forceNormalization /*=false*/)
{
assert(forceNormalization || varTypeIsStruct(structVal));
assert(structHnd != NO_CLASS_HANDLE);
var_types structType = structVal->TypeGet();
bool makeTemp = false;
if (structType == TYP_STRUCT)
{
structType = impNormStructType(structHnd);
}
bool alreadyNormalized = false;
GenTreeLclVarCommon* structLcl = nullptr;
genTreeOps oper = structVal->OperGet();
switch (oper)
{
// GT_RETURN and GT_MKREFANY don't capture the handle.
case GT_RETURN:
break;
case GT_MKREFANY:
alreadyNormalized = true;
break;
case GT_CALL:
structVal->gtCall.gtRetClsHnd = structHnd;
makeTemp = true;
break;
case GT_RET_EXPR:
structVal->gtRetExpr.gtRetClsHnd = structHnd;
makeTemp = true;
break;
case GT_ARGPLACE:
structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
break;
case GT_INDEX:
// This will be transformed to an OBJ later.
alreadyNormalized = true;
structVal->gtIndex.gtStructElemClass = structHnd;
structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
break;
case GT_FIELD:
// Wrap it in a GT_OBJ.
structVal->gtType = structType;
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
break;
case GT_LCL_VAR:
case GT_LCL_FLD:
structLcl = structVal->AsLclVarCommon();
// Wrap it in a GT_OBJ.
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
__fallthrough;
case GT_OBJ:
case GT_BLK:
case GT_DYN_BLK:
case GT_ASG:
// These should already have the appropriate type.
assert(structVal->gtType == structType);
alreadyNormalized = true;
break;
case GT_IND:
assert(structVal->gtType == structType);
structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
alreadyNormalized = true;
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWIntrinsic:
assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
break;
#endif
case GT_COMMA:
{
// The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
GenTree* blockNode = structVal->gtOp.gtOp2;
assert(blockNode->gtType == structType);
// Is this GT_COMMA(op1, GT_COMMA())?
GenTree* parent = structVal;
if (blockNode->OperGet() == GT_COMMA)
{
// Find the last node in the comma chain.
do
{
assert(blockNode->gtType == structType);
parent = blockNode;
blockNode = blockNode->gtOp.gtOp2;
} while (blockNode->OperGet() == GT_COMMA);
}
if (blockNode->OperGet() == GT_FIELD)
{
// If we have a GT_FIELD then wrap it in a GT_OBJ.
blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
}
#ifdef FEATURE_SIMD
if (blockNode->OperIsSIMDorSimdHWintrinsic())
{
parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
alreadyNormalized = true;
}
else
#endif
{
noway_assert(blockNode->OperIsBlk());
// Sink the GT_COMMA below the blockNode addr.
// That is GT_COMMA(op1, op2=blockNode) is tranformed into
// blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
//
// In case of a chained GT_COMMA case, we sink the last
// GT_COMMA below the blockNode addr.
GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
assert(blockNodeAddr->gtType == TYP_BYREF);
GenTree* commaNode = parent;
commaNode->gtType = TYP_BYREF;
commaNode->gtOp.gtOp2 = blockNodeAddr;
blockNode->gtOp.gtOp1 = commaNode;
if (parent == structVal)
{
structVal = blockNode;
}
alreadyNormalized = true;
}
}
break;
default:
noway_assert(!"Unexpected node in impNormStructVal()");
break;
}
structVal->gtType = structType;
GenTree* structObj = structVal;
if (!alreadyNormalized || forceNormalization)
{
if (makeTemp)
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
// The structVal is now the temp itself
structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
// TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
}
else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
{
// Wrap it in a GT_OBJ
structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
}
if (structLcl != nullptr)
{
// A OBJ on a ADDR(LCL_VAR) can never raise an exception
// so we don't set GTF_EXCEPT here.
if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
{
structObj->gtFlags &= ~GTF_GLOB_REF;
}
}
else
{
// In general a OBJ is an indirection and could raise an exception.
structObj->gtFlags |= GTF_EXCEPT;
}
return (structObj);
}
/******************************************************************************/
// Given a type token, generate code that will evaluate to the correct
// handle representation of that token (type handle, field handle, or method handle)
//
// For most cases, the handle is determined at compile-time, and the code
// generated is simply an embedded handle.
//
// Run-time lookup is required if the enclosing method is shared between instantiations
// and the token refers to formal type parameters whose instantiation is not known
// at compile-time.
//
GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
BOOL* pRuntimeLookup /* = NULL */,
BOOL mustRestoreHandle /* = FALSE */,
BOOL importParent /* = FALSE */)
{
assert(!fgGlobalMorph);
CORINFO_GENERICHANDLE_RESULT embedInfo;
info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
if (pRuntimeLookup)
{
*pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
}
if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
switch (embedInfo.handleType)
{
case CORINFO_HANDLETYPE_CLASS:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
break;
case CORINFO_HANDLETYPE_METHOD:
info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
break;
case CORINFO_HANDLETYPE_FIELD:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
break;
default:
break;
}
}
// Generate the full lookup tree. May be null if we're abandoning an inline attempt.
GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
embedInfo.compileTimeHandle);
// If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
}
return result;
}
GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
unsigned handleFlags,
void* compileTimeHandle)
{
if (!pLookup->lookupKind.needsRuntimeLookup)
{
// No runtime lookup is required.
// Access is direct or memory-indirect (of a fixed address) reference
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
if (pLookup->constLookup.accessType == IAT_VALUE)
{
handle = pLookup->constLookup.handle;
}
else if (pLookup->constLookup.accessType == IAT_PVALUE)
{
pIndirection = pLookup->constLookup.addr;
}
return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
}
else if (compIsForInlining())
{
// Don't import runtime lookups when inlining
// Inlining has to be aborted in such a case
compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
return nullptr;
}
else
{
// Need to use dictionary-based access which depends on the typeContext
// which is only available at runtime, not at compile-time.
return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
}
}
#ifdef FEATURE_READYTORUN_COMPILER
GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
unsigned handleFlags,
void* compileTimeHandle)
{
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE);
if (pLookup->accessType == IAT_VALUE)
{
handle = pLookup->handle;
}
else if (pLookup->accessType == IAT_PVALUE)
{
pIndirection = pLookup->addr;
}
return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
}
GenTreeCall* Compiler::impReadyToRunHelperToTree(
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CorInfoHelpFunc helper,
var_types type,
GenTreeArgList* args /* =NULL*/,
CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
{
CORINFO_CONST_LOOKUP lookup;
if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
{
return nullptr;
}
GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
op1->setEntryPoint(lookup);
return op1;
}
#endif
GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
{
GenTree* op1 = nullptr;
switch (pCallInfo->kind)
{
case CORINFO_CALL:
op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
}
else
{
op1->gtFptrVal.gtEntryPoint.addr = nullptr;
op1->gtFptrVal.gtEntryPoint.accessType = IAT_VALUE;
}
#endif
break;
case CORINFO_CALL_CODE_POINTER:
if (compIsForInlining())
{
// Don't import runtime lookups when inlining
// Inlining has to be aborted in such a case
compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
return nullptr;
}
op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
break;
default:
noway_assert(!"unknown call kind");
break;
}
return op1;
}
//------------------------------------------------------------------------
// getRuntimeContextTree: find pointer to context for runtime lookup.
//
// Arguments:
// kind - lookup kind.
//
// Return Value:
// Return GenTree pointer to generic shared context.
//
// Notes:
// Reports about generic context using.
GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
{
GenTree* ctxTree = nullptr;
// Collectible types requires that for shared generic code, if we use the generic context parameter
// that we report it. (This is a conservative approach, we could detect some cases particularly when the
// context parameter is this that we don't need the eager reporting logic.)
lvaGenericsContextUseCount++;
if (kind == CORINFO_LOOKUP_THISOBJ)
{
// this Object
ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
// Vtable pointer of this object
ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
ctxTree->gtFlags |= GTF_IND_INVARIANT;
}
else
{
assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
}
return ctxTree;
}
/*****************************************************************************/
/* Import a dictionary lookup to access a handle in code shared between
generic instantiations.
The lookup depends on the typeContext which is only available at
runtime, and not at compile-time.
pLookup->token1 and pLookup->token2 specify the handle that is needed.
The cases are:
1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
instantiation-specific handle, and the tokens to lookup the handle.
2. pLookup->indirections != CORINFO_USEHELPER :
2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
to get the handle.
2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
If it is non-NULL, it is the handle required. Else, call a helper
to lookup the handle.
*/
GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle)
{
// This method can only be called from the importer instance of the Compiler.
// In other word, it cannot be called by the instance of the Compiler for the inlinee.
assert(!compIsForInlining());
GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
// It's available only via the run-time helper function
if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
{
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
gtNewArgList(ctxTree), &pLookup->lookupKind);
}
#endif
GenTree* argNode =
gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
}
// Slot pointer
GenTree* slotPtrTree = ctxTree;
if (pRuntimeLookup->testForNull)
{
slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("impRuntimeLookup slot"));
}
GenTree* indOffTree = nullptr;
// Applied repeated indirections
for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
{
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
}
if (i != 0)
{
slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
}
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
}
if (pRuntimeLookup->offsets[i] != 0)
{
slotPtrTree =
gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
}
}
// No null test required
if (!pRuntimeLookup->testForNull)
{
if (pRuntimeLookup->indirections == 0)
{
return slotPtrTree;
}
slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
if (!pRuntimeLookup->testForFixup)
{
return slotPtrTree;
}
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
// downcast the pointer to a TYP_INT on 64-bit targets
slot = impImplicitIorI4Cast(slot, TYP_INT);
// Use a GT_AND to check for the lowest bit and indirect if it is set
GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
// slot = GT_IND(slot - 1)
slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
indir->gtFlags |= GTF_IND_NONFAULTING;
indir->gtFlags |= GTF_IND_INVARIANT;
slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
GenTree* asg = gtNewAssignNode(slot, indir);
GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
}
assert(pRuntimeLookup->indirections != 0);
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
// Extract the handle
GenTree* handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
handle->gtFlags |= GTF_IND_NONFAULTING;
GenTree* handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("impRuntimeLookup typehandle"));
// Call to helper
GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
GenTree* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
// Check for null and possibly call helper
GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
gtNewNothingNode(), // do nothing if nonnull
helperCall);
GenTree* qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
unsigned tmp;
if (handleCopy->IsLocal())
{
tmp = handleCopy->gtLclVarCommon.gtLclNum;
}
else
{
tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
}
impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
return gtNewLclvNode(tmp, TYP_I_IMPL);
}
/******************************************************************************
* Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
* If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
* else, grab a new temp.
* For structs (which can be pushed on the stack using obj, etc),
* special handling is needed
*/
struct RecursiveGuard
{
public:
RecursiveGuard()
{
m_pAddress = nullptr;
}
~RecursiveGuard()
{
if (m_pAddress)
{
*m_pAddress = false;
}
}
void Init(bool* pAddress, bool bInitialize)
{
assert(pAddress && *pAddress == false && "Recursive guard violation");
m_pAddress = pAddress;
if (bInitialize)
{
*m_pAddress = true;
}
}
protected:
bool* m_pAddress;
};
bool Compiler::impSpillStackEntry(unsigned level,
unsigned tnum
#ifdef DEBUG
,
bool bAssertOnRecursion,
const char* reason
#endif
)
{
#ifdef DEBUG
RecursiveGuard guard;
guard.Init(&impNestedStackSpill, bAssertOnRecursion);
#endif
GenTree* tree = verCurrentState.esStack[level].val;
/* Allocate a temp if we haven't been asked to use a particular one */
if (tiVerificationNeeded)
{
// Ignore bad temp requests (they will happen with bad code and will be
// catched when importing the destblock)
if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
{
return false;
}
}
else
{
if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
{
return false;
}
}
bool isNewTemp = false;
if (tnum == BAD_VAR_NUM)
{
tnum = lvaGrabTemp(true DEBUGARG(reason));
isNewTemp = true;
}
else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
{
// if verification is needed and tnum's type is incompatible with
// type on that stack, we grab a new temp. This is safe since
// we will throw a verification exception in the dest block.
var_types valTyp = tree->TypeGet();
var_types dstTyp = lvaTable[tnum].TypeGet();
// if the two types are different, we return. This will only happen with bad code and will
// be catched when importing the destblock. We still allow int/byrefs and float/double differences.
if ((genActualType(valTyp) != genActualType(dstTyp)) &&
!(
#ifndef _TARGET_64BIT_
(valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
#endif // !_TARGET_64BIT_
(varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
{
if (verNeedsVerification())
{
return false;
}
}
}
/* Assign the spilled entry to the temp */
impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
// If temp is newly introduced and a ref type, grab what type info we can.
if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
{
CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
lvaSetClass(tnum, tree, stkHnd);
}
// The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
var_types type = genActualType(lvaTable[tnum].TypeGet());
GenTree* temp = gtNewLclvNode(tnum, type);
verCurrentState.esStack[level].val = temp;
return true;
}
/*****************************************************************************
*
* Ensure that the stack has only spilled values
*/
void Compiler::impSpillStackEnsure(bool spillLeaves)
{
assert(!spillLeaves || opts.compDbgCode);
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
if (!spillLeaves && tree->OperIsLeaf())
{
continue;
}
// Temps introduced by the importer itself don't need to be spilled
bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
if (isTempLcl)
{
continue;
}
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
}
}
void Compiler::impSpillEvalStack()
{
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
}
}
/*****************************************************************************
*
* If the stack contains any trees with side effects in them, assign those
* trees to temps and append the assignments to the statement list.
* On return the stack is guaranteed to be empty.
*/
inline void Compiler::impEvalSideEffects()
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
verCurrentState.esStackDepth = 0;
}
/*****************************************************************************
*
* If the stack contains any trees with side effects in them, assign those
* trees to temps and replace them on the stack with refs to their temps.
* [0..chkLevel) is the portion of the stack which will be checked and spilled.
*/
inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
{
assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
/* Before we make any appends to the tree list we must spill the
* "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
impSpillSpecialSideEff();
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
{
chkLevel = verCurrentState.esStackDepth;
}
assert(chkLevel <= verCurrentState.esStackDepth);
unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
for (unsigned i = 0; i < chkLevel; i++)
{
GenTree* tree = verCurrentState.esStack[i].val;
GenTree* lclVarTree;
if ((tree->gtFlags & spillFlags) != 0 ||
(spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
!impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
// lvAddrTaken flag.
{
impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
}
}
}
/*****************************************************************************
*
* If the stack contains any trees with special side effects in them, assign
* those trees to temps and replace them on the stack with refs to their temps.
*/
inline void Compiler::impSpillSpecialSideEff()
{
// Only exception objects need to be carefully handled
if (!compCurBB->bbCatchTyp)
{
return;
}
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
// Make sure if we have an exception object in the sub tree we spill ourselves.
if (gtHasCatchArg(tree))
{
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
}
}
}
/*****************************************************************************
*
* Spill all stack references to value classes (TYP_STRUCT nodes)
*/
void Compiler::impSpillValueClasses()
{
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
{
// Tree walk was aborted, which means that we found a
// value class on the stack. Need to spill that
// stack entry.
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
}
}
}
/*****************************************************************************
*
* Callback that checks if a tree node is TYP_STRUCT
*/
Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
{
fgWalkResult walkResult = WALK_CONTINUE;
if ((*pTree)->gtType == TYP_STRUCT)
{
// Abort the walk and indicate that we found a value class
walkResult = WALK_ABORT;
}
return walkResult;
}
/*****************************************************************************
*
* If the stack contains any trees with references to local #lclNum, assign
* those trees to temps and replace their place on the stack with refs to
* their temps.
*/
void Compiler::impSpillLclRefs(ssize_t lclNum)
{
/* Before we make any appends to the tree list we must spill the
* "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
impSpillSpecialSideEff();
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
/* If the tree may throw an exception, and the block has a handler,
then we need to spill assignments to the local if the local is
live on entry to the handler.
Just spill 'em all without considering the liveness */
bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
/* Skip the tree if it doesn't have an affected reference,
unless xcptnCaught */
if (xcptnCaught || gtHasRef(tree, lclNum, false))
{
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
}
}
}
/*****************************************************************************
*
* Push catch arg onto the stack.
* If there are jumps to the beginning of the handler, insert basic block
* and spill catch arg to a temp. Update the handler block if necessary.
*
* Returns the basic block of the actual handler.
*/
BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
{
// Do not inject the basic block twice on reimport. This should be
// hit only under JIT stress. See if the block is the one we injected.
// Note that EH canonicalization can inject internal blocks here. We might
// be able to re-use such a block (but we don't, right now).
if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
(BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
{
GenTree* tree = hndBlk->bbTreeList;
if (tree != nullptr && tree->gtOper == GT_STMT)
{
tree = tree->gtStmt.gtStmtExpr;
assert(tree != nullptr);
if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
(tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
{
tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
return hndBlk->bbNext;
}
}
// If we get here, it must have been some other kind of internal block. It's possible that
// someone prepended something to our injected block, but that's unlikely.
}
/* Push the exception address value on the stack */
GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
/* Mark the node as having a side-effect - i.e. cannot be
* moved around since it is tied to a fixed location (EAX) */
arg->gtFlags |= GTF_ORDER_SIDEEFF;
#if defined(JIT32_GCENCODER)
const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
#else
const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5);
#endif // defined(JIT32_GCENCODER)
/* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
{
if (hndBlk->bbRefs == 1)
{
hndBlk->bbRefs++;
}
/* Create extra basic block for the spill */
BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
newBlk->setBBWeight(hndBlk->bbWeight);
newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
/* Account for the new link we are about to create */
hndBlk->bbRefs++;
/* Spill into a temp */
unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
lvaTable[tempNum].lvType = TYP_REF;
arg = gtNewTempAssign(tempNum, arg);
hndBlk->bbStkTempsIn = tempNum;
/* Report the debug info. impImportBlockCode won't treat
* the actual handler as exception block and thus won't do it for us. */
if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
{
impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
arg = gtNewStmt(arg, impCurStmtOffs);
}
fgInsertStmtAtEnd(newBlk, arg);
arg = gtNewLclvNode(tempNum, TYP_REF);
}
impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
return hndBlk;
}
/*****************************************************************************
*
* Given a tree, clone it. *pClone is set to the cloned tree.
* Returns the original tree if the cloning was easy,
* else returns the temp to which the tree had to be spilled to.
* If the tree has side-effects, it will be spilled to a temp.
*/
GenTree* Compiler::impCloneExpr(GenTree* tree,
GenTree** pClone,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
GenTree** pAfterStmt DEBUGARG(const char* reason))
{
if (!(tree->gtFlags & GTF_GLOB_EFFECT))
{
GenTree* clone = gtClone(tree, true);
if (clone)
{
*pClone = clone;
return tree;
}
}
/* Store the operand in a temp and return the temp */
unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
// impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
// return a struct type. It also may modify the struct type to a more
// specialized type (e.g. a SIMD type). So we will get the type from
// the lclVar AFTER calling impAssignTempGen().
impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
var_types type = genActualType(lvaTable[temp].TypeGet());
*pClone = gtNewLclvNode(temp, type);
return gtNewLclvNode(temp, type);
}
/*****************************************************************************
* Remember the IL offset (including stack-empty info) for the trees we will
* generate now.
*/
inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
{
if (compIsForInlining())
{
GenTree* callStmt = impInlineInfo->iciStmt;
assert(callStmt->gtOper == GT_STMT);
impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
}
else
{
assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
impCurStmtOffs = offs | stkBit;
}
}
/*****************************************************************************
* Returns current IL offset with stack-empty and call-instruction info incorporated
*/
inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
{
if (compIsForInlining())
{
return BAD_IL_OFFSET;
}
else
{
assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
return offs | stkBit | callInstructionBit;
}
}
//------------------------------------------------------------------------
// impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
//
// Arguments:
// prevOpcode - last importer opcode
//
// Return Value:
// true if it is legal, false if it could be a sequence that we do not want to divide.
bool Compiler::impCanSpillNow(OPCODE prevOpcode)
{
// Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
// Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
}
/*****************************************************************************
*
* Remember the instr offset for the statements
*
* When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
* impCurOpcOffs, if the append was done because of a partial stack spill,
* as some of the trees corresponding to code up to impCurOpcOffs might
* still be sitting on the stack.
* So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
* This should be called when an opcode finally/explicitly causes
* impAppendTree(tree) to be called (as opposed to being called because of
* a spill caused by the opcode)
*/
#ifdef DEBUG
void Compiler::impNoteLastILoffs()
{
if (impLastILoffsStmt == nullptr)
{
// We should have added a statement for the current basic block
// Is this assert correct ?
assert(impTreeLast);
assert(impTreeLast->gtOper == GT_STMT);
impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
}
else
{
impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
impLastILoffsStmt = nullptr;
}
}
#endif // DEBUG
/*****************************************************************************
* We don't create any GenTree (excluding spills) for a branch.
* For debugging info, we need a placeholder so that we can note
* the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
*/
void Compiler::impNoteBranchOffs()
{
if (opts.compDbgCode)
{
impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
}
}
/*****************************************************************************
* Locate the next stmt boundary for which we need to record info.
* We will have to spill the stack at such boundaries if it is not
* already empty.
* Returns the next stmt boundary (after the start of the block)
*/
unsigned Compiler::impInitBlockLineInfo()
{
/* Assume the block does not correspond with any IL offset. This prevents
us from reporting extra offsets. Extra mappings can cause confusing
stepping, especially if the extra mapping is a jump-target, and the
debugger does not ignore extra mappings, but instead rewinds to the
nearest known offset */
impCurStmtOffsSet(BAD_IL_OFFSET);
if (compIsForInlining())
{
return ~0;
}
IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
{
impCurStmtOffsSet(blockOffs);
}
if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
{
impCurStmtOffsSet(blockOffs);
}
/* Always report IL offset 0 or some tests get confused.
Probably a good idea anyways */
if (blockOffs == 0)
{
impCurStmtOffsSet(blockOffs);
}
if (!info.compStmtOffsetsCount)
{
return ~0;
}
/* Find the lowest explicit stmt boundary within the block */
/* Start looking at an entry that is based on our instr offset */
unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
if (index >= info.compStmtOffsetsCount)
{
index = info.compStmtOffsetsCount - 1;
}
/* If we've guessed too far, back up */
while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
{
index--;
}
/* If we guessed short, advance ahead */
while (info.compStmtOffsets[index] < blockOffs)
{
index++;
if (index == info.compStmtOffsetsCount)
{
return info.compStmtOffsetsCount;
}
}
assert(index < info.compStmtOffsetsCount);
if (info.compStmtOffsets[index] == blockOffs)
{
/* There is an explicit boundary for the start of this basic block.
So we will start with bbCodeOffs. Else we will wait until we
get to the next explicit boundary */
impCurStmtOffsSet(blockOffs);
index++;
}
return index;
}
/*****************************************************************************/
static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
{
switch (opcode)
{
case CEE_CALL:
case CEE_CALLI:
case CEE_CALLVIRT:
return true;
default:
return false;
}
}
/*****************************************************************************/
static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
{
switch (opcode)
{
case CEE_CALL:
case CEE_CALLI:
case CEE_CALLVIRT:
case CEE_JMP:
case CEE_NEWOBJ:
case CEE_NEWARR:
return true;
default:
return false;
}
}
/*****************************************************************************/
// One might think it is worth caching these values, but results indicate
// that it isn't.
// In addition, caching them causes SuperPMI to be unable to completely
// encapsulate an individual method context.
CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
{
CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
return refAnyClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
{
CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
return typeHandleClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
{
CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
return argIteratorClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
{
CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
return stringClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
{
CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
return objectClass;
}
/*****************************************************************************
* "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
* set its type to TYP_BYREF when we create it. We know if it can be
* changed to TYP_I_IMPL only at the point where we use it
*/
/* static */
void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
{
if (tree1->IsVarAddr())
{
tree1->gtType = TYP_I_IMPL;
}
if (tree2 && tree2->IsVarAddr())
{
tree2->gtType = TYP_I_IMPL;
}
}
/*****************************************************************************
* TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
* to make that an explicit cast in our trees, so any implicit casts that
* exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
* turned into explicit casts here.
* We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
*/
GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
{
var_types currType = genActualType(tree->gtType);
var_types wantedType = genActualType(dstTyp);
if (wantedType != currType)
{
// Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
{
if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
{
tree->gtType = TYP_I_IMPL;
}
}
#ifdef _TARGET_64BIT_
else if (varTypeIsI(wantedType) && (currType == TYP_INT))
{
// Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
else if ((wantedType == TYP_INT) && varTypeIsI(currType))
{
// Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
}
#endif // _TARGET_64BIT_
}
return tree;
}
/*****************************************************************************
* TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
* but we want to make that an explicit cast in our trees, so any implicit casts
* that exist in the IL are turned into explicit casts here.
*/
GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
{
if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
{
tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
}
return tree;
}
//------------------------------------------------------------------------
// impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
// with a GT_COPYBLK node.
//
// Arguments:
// sig - The InitializeArray signature.
//
// Return Value:
// A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
// nullptr otherwise.
//
// Notes:
// The function recognizes the following IL pattern:
// ldc <length> or a list of ldc <lower bound>/<length>
// newarr or newobj
// dup
// ldtoken <field handle>
// call InitializeArray
// The lower bounds need not be constant except when the array rank is 1.
// The function recognizes all kinds of arrays thus enabling a small runtime
// such as CoreRT to skip providing an implementation for InitializeArray.
GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
{
assert(sig->numArgs == 2);
GenTree* fieldTokenNode = impStackTop(0).val;
GenTree* arrayLocalNode = impStackTop(1).val;
//
// Verify that the field token is known and valid. Note that It's also
// possible for the token to come from reflection, in which case we cannot do
// the optimization and must therefore revert to calling the helper. You can
// see an example of this in bvt\DynIL\initarray2.exe (in Main).
//
// Check to see if the ldtoken helper call is what we see here.
if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
(fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
{
return nullptr;
}
// Strip helper call away
fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
if (fieldTokenNode->gtOper == GT_IND)
{
fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
}
// Check for constant
if (fieldTokenNode->gtOper != GT_CNS_INT)
{
return nullptr;
}
CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
{
return nullptr;
}
//
// We need to get the number of elements in the array and the size of each element.
// We verify that the newarr statement is exactly what we expect it to be.
// If it's not then we just return NULL and we don't optimize this call
//
//
// It is possible the we don't have any statements in the block yet
//
if (impTreeLast->gtOper != GT_STMT)
{
assert(impTreeLast->gtOper == GT_BEG_STMTS);
return nullptr;
}
//
// We start by looking at the last statement, making sure it's an assignment, and
// that the target of the assignment is the array passed to InitializeArray.
//
GenTree* arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
(arrayLocalNode->gtOper != GT_LCL_VAR) ||
(arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
{
return nullptr;
}
//
// Make sure that the object being assigned is a helper call.
//
GenTree* newArrayCall = arrayAssignment->gtOp.gtOp2;
if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
{
return nullptr;
}
//
// Verify that it is one of the new array helpers.
//
bool isMDArray = false;
if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
#ifdef FEATURE_READYTORUN_COMPILER
&& newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
#endif
)
{
if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
{
return nullptr;
}
isMDArray = true;
}
CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
//
// Make sure we found a compile time handle to the array
//
if (!arrayClsHnd)
{
return nullptr;
}
unsigned rank = 0;
S_UINT32 numElements;
if (isMDArray)
{
rank = info.compCompHnd->getArrayRank(arrayClsHnd);
if (rank == 0)
{
return nullptr;
}
GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
assert(tokenArg != nullptr);
GenTreeArgList* numArgsArg = tokenArg->Rest();
assert(numArgsArg != nullptr);
GenTreeArgList* argsArg = numArgsArg->Rest();
assert(argsArg != nullptr);
//
// The number of arguments should be a constant between 1 and 64. The rank can't be 0
// so at least one length must be present and the rank can't exceed 32 so there can
// be at most 64 arguments - 32 lengths and 32 lower bounds.
//
if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
(numArgsArg->Current()->AsIntCon()->IconValue() > 64))
{
return nullptr;
}
unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
bool lowerBoundsSpecified;
if (numArgs == rank * 2)
{
lowerBoundsSpecified = true;
}
else if (numArgs == rank)
{
lowerBoundsSpecified = false;
//
// If the rank is 1 and a lower bound isn't specified then the runtime creates
// a SDArray. Note that even if a lower bound is specified it can be 0 and then
// we get a SDArray as well, see the for loop below.
//
if (rank == 1)
{
isMDArray = false;
}
}
else
{
return nullptr;
}
//
// The rank is known to be at least 1 so we can start with numElements being 1
// to avoid the need to special case the first dimension.
//
numElements = S_UINT32(1);
struct Match
{
static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
{
return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
}
static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
{
return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
(tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
}
static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
{
return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
(tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
}
static bool IsComma(GenTree* tree)
{
return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
}
};
unsigned argIndex = 0;
GenTree* comma;
for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
{
if (lowerBoundsSpecified)
{
//
// In general lower bounds can be ignored because they're not needed to
// calculate the total number of elements. But for single dimensional arrays
// we need to know if the lower bound is 0 because in this case the runtime
// creates a SDArray and this affects the way the array data offset is calculated.
//
if (rank == 1)
{
GenTree* lowerBoundAssign = comma->gtGetOp1();
assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
if (lowerBoundNode->IsIntegralConst(0))
{
isMDArray = false;
}
}
comma = comma->gtGetOp2();
argIndex++;
}
GenTree* lengthNodeAssign = comma->gtGetOp1();
assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
if (!lengthNode->IsCnsIntOrI())
{
return nullptr;
}
numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
argIndex++;
}
assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
if (argIndex != numArgs)
{
return nullptr;
}
}
else
{
//
// Make sure there are exactly two arguments: the array class and
// the number of elements.
//
GenTree* arrayLengthNode;
GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
#ifdef FEATURE_READYTORUN_COMPILER
if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
{
// Array length is 1st argument for readytorun helper
arrayLengthNode = args->Current();
}
else
#endif
{
// Array length is 2nd argument for regular helper
arrayLengthNode = args->Rest()->Current();
}
//
// Make sure that the number of elements look valid.
//
if (arrayLengthNode->gtOper != GT_CNS_INT)
{
return nullptr;
}
numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
if (!info.compCompHnd->isSDArray(arrayClsHnd))
{
return nullptr;
}
}
CORINFO_CLASS_HANDLE elemClsHnd;
var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
//
// Note that genTypeSize will return zero for non primitive types, which is exactly
// what we want (size will then be 0, and we will catch this in the conditional below).
// Note that we don't expect this to fail for valid binaries, so we assert in the
// non-verification case (the verification case should not assert but rather correctly
// handle bad binaries). This assert is not guarding any specific invariant, but rather
// saying that we don't expect this to happen, and if it is hit, we need to investigate
// why.
//
S_UINT32 elemSize(genTypeSize(elementType));
S_UINT32 size = elemSize * S_UINT32(numElements);
if (size.IsOverflow())
{
return nullptr;
}
if ((size.Value() == 0) || (varTypeIsGC(elementType)))
{
assert(verNeedsVerification());
return nullptr;
}
void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
if (!initData)
{
return nullptr;
}
//
// At this point we are ready to commit to implementing the InitializeArray
// intrinsic using a struct assignment. Pop the arguments from the stack and
// return the struct assignment node.
//
impPopStack();
impPopStack();
const unsigned blkSize = size.Value();
unsigned dataOffset;
if (isMDArray)
{
dataOffset = eeGetMDArrayDataOffset(elementType, rank);
}
else
{
dataOffset = eeGetArrayDataOffset(elementType);
}
GenTree* dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
GenTree* blk = gtNewBlockVal(dst, blkSize);
GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
return gtNewBlkOpNode(blk, // dst
src, // src
blkSize, // size
false, // volatil
true); // copyBlock
}
//------------------------------------------------------------------------
// impIntrinsic: possibly expand intrinsic call into alternate IR sequence
//
// Arguments:
// newobjThis - for constructor calls, the tree for the newly allocated object
// clsHnd - handle for the intrinsic method's class
// method - handle for the intrinsic method
// sig - signature of the intrinsic method
// methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
// memberRef - the token for the intrinsic method
// readonlyCall - true if call has a readonly prefix
// tailCall - true if call is in tail position
// pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
// if call is not constrained
// constraintCallThisTransform -- this transform to apply for a constrained call
// pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
// for "traditional" jit intrinsics
// isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
// that is amenable to special downstream optimization opportunities
//
// Returns:
// IR tree to use in place of the call, or nullptr if the jit should treat
// the intrinsic call like a normal call.
//
// pIntrinsicID set to non-illegal value if the call is recognized as a
// traditional jit intrinsic, even if the intrinsic is not expaned.
//
// isSpecial set true if the expansion is subject to special
// optimizations later in the jit processing
//
// Notes:
// On success the IR tree may be a call to a different method or an inline
// sequence. If it is a call, then the intrinsic processing here is responsible
// for handling all the special cases, as upon return to impImportCall
// expanded intrinsics bypass most of the normal call processing.
//
// Intrinsics are generally not recognized in minopts and debug codegen.
//
// However, certain traditional intrinsics are identifed as "must expand"
// if there is no fallback implmentation to invoke; these must be handled
// in all codegen modes.
//
// New style intrinsics (where the fallback implementation is in IL) are
// identified as "must expand" if they are invoked from within their
// own method bodies.
//
GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
unsigned methodFlags,
int memberRef,
bool readonlyCall,
bool tailCall,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
CORINFO_THIS_TRANSFORM constraintCallThisTransform,
CorInfoIntrinsics* pIntrinsicID,
bool* isSpecialIntrinsic)
{
assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
bool mustExpand = false;
bool isSpecial = false;
CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
NamedIntrinsic ni = NI_Illegal;
if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
{
intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
}
if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
{
// The recursive calls to Jit intrinsics are must-expand by convention.
mustExpand = mustExpand || gtIsRecursiveCall(method);
if (intrinsicID == CORINFO_INTRINSIC_Illegal)
{
ni = lookupNamedIntrinsic(method);
#ifdef FEATURE_HW_INTRINSICS
if (ni > NI_HW_INTRINSIC_START && ni < NI_HW_INTRINSIC_END)
{
return impHWIntrinsic(ni, method, sig, mustExpand);
}
#endif // FEATURE_HW_INTRINSICS
}
}
*pIntrinsicID = intrinsicID;
#ifndef _TARGET_ARM_
genTreeOps interlockedOperator;
#endif
if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
{
// must be done regardless of DbgCode and MinOpts
return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
}
#ifdef _TARGET_64BIT_
if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
{
// must be done regardless of DbgCode and MinOpts
return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
}
#else
assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
#endif
GenTree* retNode = nullptr;
// Under debug and minopts, only expand what is required.
if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
{
*pIntrinsicID = CORINFO_INTRINSIC_Illegal;
return retNode;
}
var_types callType = JITtype2varType(sig->retType);
/* First do the intrinsics which are always smaller than a call */
switch (intrinsicID)
{
GenTree* op1;
GenTree* op2;
case CORINFO_INTRINSIC_Sin:
case CORINFO_INTRINSIC_Cbrt:
case CORINFO_INTRINSIC_Sqrt:
case CORINFO_INTRINSIC_Abs:
case CORINFO_INTRINSIC_Cos:
case CORINFO_INTRINSIC_Round:
case CORINFO_INTRINSIC_Cosh:
case CORINFO_INTRINSIC_Sinh:
case CORINFO_INTRINSIC_Tan:
case CORINFO_INTRINSIC_Tanh:
case CORINFO_INTRINSIC_Asin:
case CORINFO_INTRINSIC_Asinh:
case CORINFO_INTRINSIC_Acos:
case CORINFO_INTRINSIC_Acosh:
case CORINFO_INTRINSIC_Atan:
case CORINFO_INTRINSIC_Atan2:
case CORINFO_INTRINSIC_Atanh:
case CORINFO_INTRINSIC_Log10:
case CORINFO_INTRINSIC_Pow:
case CORINFO_INTRINSIC_Exp:
case CORINFO_INTRINSIC_Ceiling:
case CORINFO_INTRINSIC_Floor:
retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall);
break;
#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
// TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
// Note that CORINFO_INTRINSIC_InterlockedAdd32/64 are not actually used.
// Anyway, we can import them as XADD and leave it to lowering/codegen to perform
// whatever optimizations may arise from the fact that result value is not used.
case CORINFO_INTRINSIC_InterlockedAdd32:
case CORINFO_INTRINSIC_InterlockedXAdd32:
interlockedOperator = GT_XADD;
goto InterlockedBinOpCommon;
case CORINFO_INTRINSIC_InterlockedXchg32:
interlockedOperator = GT_XCHG;
goto InterlockedBinOpCommon;
#ifdef _TARGET_64BIT_
case CORINFO_INTRINSIC_InterlockedAdd64:
case CORINFO_INTRINSIC_InterlockedXAdd64:
interlockedOperator = GT_XADD;
goto InterlockedBinOpCommon;
case CORINFO_INTRINSIC_InterlockedXchg64:
interlockedOperator = GT_XCHG;
goto InterlockedBinOpCommon;
#endif // _TARGET_AMD64_
InterlockedBinOpCommon:
assert(callType != TYP_STRUCT);
assert(sig->numArgs == 2);
op2 = impPopStack().val;
op1 = impPopStack().val;
// This creates:
// val
// XAdd
// addr
// field (for example)
//
// In the case where the first argument is the address of a local, we might
// want to make this *not* make the var address-taken -- but atomic instructions
// on a local are probably pretty useless anyway, so we probably don't care.
op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
retNode = op1;
break;
#endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
case CORINFO_INTRINSIC_MemoryBarrier:
assert(sig->numArgs == 0);
op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
retNode = op1;
break;
#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
// TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
case CORINFO_INTRINSIC_InterlockedCmpXchg32:
#ifdef _TARGET_64BIT_
case CORINFO_INTRINSIC_InterlockedCmpXchg64:
#endif
{
assert(callType != TYP_STRUCT);
assert(sig->numArgs == 3);
GenTree* op3;
op3 = impPopStack().val; // comparand
op2 = impPopStack().val; // value
op1 = impPopStack().val; // location
GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
retNode = node;
break;
}
#endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
case CORINFO_INTRINSIC_StringLength:
op1 = impPopStack().val;
if (!opts.MinOpts() && !opts.compDbgCode)
{
GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen);
op1 = arrLen;
}
else
{
/* Create the expression "*(str_addr + stringLengthOffset)" */
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
gtNewIconNode(OFFSETOF__CORINFO_String__stringLen, TYP_I_IMPL));
op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
}
// Getting the length of a null string should throw
op1->gtFlags |= GTF_EXCEPT;
retNode = op1;
break;
case CORINFO_INTRINSIC_StringGetChar:
op2 = impPopStack().val;
op1 = impPopStack().val;
op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
op1->gtFlags |= GTF_INX_STRING_LAYOUT;
retNode = op1;
break;
case CORINFO_INTRINSIC_InitializeArray:
retNode = impInitializeArrayIntrinsic(sig);
break;
case CORINFO_INTRINSIC_Array_Address:
case CORINFO_INTRINSIC_Array_Get:
case CORINFO_INTRINSIC_Array_Set:
retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
break;
case CORINFO_INTRINSIC_GetTypeFromHandle:
op1 = impStackTop(0).val;
if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
{
op1 = impPopStack().val;
// Change call to return RuntimeType directly.
op1->gtType = TYP_REF;
retNode = op1;
}
// Call the regular function.
break;
case CORINFO_INTRINSIC_RTH_GetValueInternal:
op1 = impStackTop(0).val;
if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
{
// Old tree
// Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
//
// New tree
// TreeToGetNativeTypeHandle
// Remove call to helper and return the native TypeHandle pointer that was the parameter
// to that helper.
op1 = impPopStack().val;
// Get native TypeHandle argument to old helper
op1 = op1->gtCall.gtCallArgs;
assert(op1->OperIsList());
assert(op1->gtOp.gtOp2 == nullptr);
op1 = op1->gtOp.gtOp1;
retNode = op1;
}
// Call the regular function.
break;
case CORINFO_INTRINSIC_Object_GetType:
{
JITDUMP("\n impIntrinsic: call to Object.GetType\n");
op1 = impStackTop(0).val;
// If we're calling GetType on a boxed value, just get the type directly.
if (op1->IsBoxedValue())
{
JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
// Try and clean up the box. Obtain the handle we
// were going to pass to the newobj.
GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
if (boxTypeHandle != nullptr)
{
// Note we don't need to play the TYP_STRUCT games here like
// do for LDTOKEN since the return value of this operator is Type,
// not RuntimeTypeHandle.
impPopStack();
GenTreeArgList* helperArgs = gtNewArgList(boxTypeHandle);
GenTree* runtimeType =
gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
retNode = runtimeType;
}
}
// If we have a constrained callvirt with a "box this" transform
// we know we have a value class and hence an exact type.
//
// If so, instead of boxing and then extracting the type, just
// construct the type directly.
if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
(constraintCallThisTransform == CORINFO_BOX_THIS))
{
// Ensure this is one of the is simple box cases (in particular, rule out nullables).
const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
if (isSafeToOptimize)
{
JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
impPopStack();
GenTree* typeHandleOp =
impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
if (typeHandleOp == nullptr)
{
assert(compDonotInline());
return nullptr;
}
GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
GenTree* runtimeType =
gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
retNode = runtimeType;
}
}
#ifdef DEBUG
if (retNode != nullptr)
{
JITDUMP("Optimized result for call to GetType is\n");
if (verbose)
{
gtDispTree(retNode);
}
}
#endif
// Else expand as an intrinsic, unless the call is constrained,
// in which case we defer expansion to allow impImportCall do the
// special constraint processing.
if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
{
JITDUMP("Expanding as special intrinsic\n");
impPopStack();
op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
// Set the CALL flag to indicate that the operator is implemented by a call.
// Set also the EXCEPTION flag because the native implementation of
// CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
retNode = op1;
// Might be further optimizable, so arrange to leave a mark behind
isSpecial = true;
}
if (retNode == nullptr)
{
JITDUMP("Leaving as normal call\n");
// Might be further optimizable, so arrange to leave a mark behind
isSpecial = true;
}
break;
}
// Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field
// in a value type. The canonical example of this is Span<T>. In effect this is just a
// substitution. The parameter byref will be assigned into the newly allocated object.
case CORINFO_INTRINSIC_ByReference_Ctor:
{
// Remove call to constructor and directly assign the byref passed
// to the call to the first slot of the ByReference struct.
op1 = impPopStack().val;
GenTree* thisptr = newobjThis;
CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0);
GenTree* assign = gtNewAssignNode(field, op1);
GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
assert(byReferenceStruct != nullptr);
impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
retNode = assign;
break;
}
// Implement ptr value getter for ByReference struct.
case CORINFO_INTRINSIC_ByReference_Value:
{
op1 = impPopStack().val;
CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0);
retNode = field;
break;
}
case CORINFO_INTRINSIC_Span_GetItem:
case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
{
// Have index, stack pointer-to Span<T> s on the stack. Expand to:
//
// For Span<T>
// Comma
// BoundsCheck(index, s->_length)
// s->_pointer + index * sizeof(T)
//
// For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
//
// Signature should show one class type parameter, which
// we need to examine.
assert(sig->sigInst.classInstCount == 1);
CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd);
assert(elemSize > 0);
const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
info.compCompHnd->getClassName(spanElemHnd), elemSize);
GenTree* index = impPopStack().val;
GenTree* ptrToSpan = impPopStack().val;
GenTree* indexClone = nullptr;
GenTree* ptrToSpanClone = nullptr;
#if defined(DEBUG)
if (verbose)
{
printf("with ptr-to-span\n");