579 changes: 287 additions & 292 deletions llvm/lib/Target/X86/X86MCInstLower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1966,233 +1966,9 @@ static unsigned getRegisterWidth(const MCOperandInfo &Info) {
llvm_unreachable("Unknown register class!");
}

void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
X86MCInstLower MCInstLowering(*MF, *this);
const X86RegisterInfo *RI =
MF->getSubtarget<X86Subtarget>().getRegisterInfo();

// Add a comment about EVEX-2-VEX compression for AVX-512 instrs that
// are compressed from EVEX encoding to VEX encoding.
if (TM.Options.MCOptions.ShowMCEncoding) {
if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_VEX)
OutStreamer->AddComment("EVEX TO VEX Compression ", false);
}

static void addConstantComments(const MachineInstr *MI,
MCStreamer &OutStreamer) {
switch (MI->getOpcode()) {
case TargetOpcode::DBG_VALUE:
llvm_unreachable("Should be handled target independently");

// Emit nothing here but a comment if we can.
case X86::Int_MemBarrier:
OutStreamer->emitRawComment("MEMBARRIER");
return;

case X86::EH_RETURN:
case X86::EH_RETURN64: {
// Lower these as normal, but add some comments.
Register Reg = MI->getOperand(0).getReg();
OutStreamer->AddComment(StringRef("eh_return, addr: %") +
X86ATTInstPrinter::getRegisterName(Reg));
break;
}
case X86::CLEANUPRET: {
// Lower these as normal, but add some comments.
OutStreamer->AddComment("CLEANUPRET");
break;
}

case X86::CATCHRET: {
// Lower these as normal, but add some comments.
OutStreamer->AddComment("CATCHRET");
break;
}

case X86::ENDBR32:
case X86::ENDBR64: {
// CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
// -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
// non-empty. If MI is the initial ENDBR, place the
// __patchable_function_entries label after ENDBR.
if (CurrentPatchableFunctionEntrySym &&
CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
MI == &MF->front().front()) {
MCInst Inst;
MCInstLowering.Lower(MI, Inst);
EmitAndCountInstruction(Inst);
CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
return;
}
break;
}

case X86::TAILJMPr:
case X86::TAILJMPm:
case X86::TAILJMPd:
case X86::TAILJMPd_CC:
case X86::TAILJMPr64:
case X86::TAILJMPm64:
case X86::TAILJMPd64:
case X86::TAILJMPd64_CC:
case X86::TAILJMPr64_REX:
case X86::TAILJMPm64_REX:
// Lower these as normal, but add some comments.
OutStreamer->AddComment("TAILCALL");
break;

case X86::TLS_addr32:
case X86::TLS_addr64:
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
return LowerTlsAddr(MCInstLowering, *MI);

case X86::MOVPC32r: {
// This is a pseudo op for a two instruction sequence with a label, which
// looks like:
// call "L1$pb"
// "L1$pb":
// popl %esi

// Emit the call.
MCSymbol *PICBase = MF->getPICBaseSymbol();
// FIXME: We would like an efficient form for this, so we don't have to do a
// lot of extra uniquing.
EmitAndCountInstruction(
MCInstBuilder(X86::CALLpcrel32)
.addExpr(MCSymbolRefExpr::create(PICBase, OutContext)));

const X86FrameLowering *FrameLowering =
MF->getSubtarget<X86Subtarget>().getFrameLowering();
bool hasFP = FrameLowering->hasFP(*MF);

// TODO: This is needed only if we require precise CFA.
bool HasActiveDwarfFrame = OutStreamer->getNumFrameInfos() &&
!OutStreamer->getDwarfFrameInfos().back().End;

int stackGrowth = -RI->getSlotSize();

if (HasActiveDwarfFrame && !hasFP) {
OutStreamer->emitCFIAdjustCfaOffset(-stackGrowth);
}

// Emit the label.
OutStreamer->emitLabel(PICBase);

// popl $reg
EmitAndCountInstruction(
MCInstBuilder(X86::POP32r).addReg(MI->getOperand(0).getReg()));

if (HasActiveDwarfFrame && !hasFP) {
OutStreamer->emitCFIAdjustCfaOffset(stackGrowth);
}
return;
}

case X86::ADD32ri: {
// Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS)
break;

// Okay, we have something like:
// EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL)

// For this, we want to print something like:
// MYGLOBAL + (. - PICBASE)
// However, we can't generate a ".", so just emit a new label here and refer
// to it.
MCSymbol *DotSym = OutContext.createTempSymbol();
OutStreamer->emitLabel(DotSym);

// Now that we have emitted the label, lower the complex operand expression.
MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2));

const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext);
const MCExpr *PICBase =
MCSymbolRefExpr::create(MF->getPICBaseSymbol(), OutContext);
DotExpr = MCBinaryExpr::createSub(DotExpr, PICBase, OutContext);

DotExpr = MCBinaryExpr::createAdd(
MCSymbolRefExpr::create(OpSym, OutContext), DotExpr, OutContext);

EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri)
.addReg(MI->getOperand(0).getReg())
.addReg(MI->getOperand(1).getReg())
.addExpr(DotExpr));
return;
}
case TargetOpcode::STATEPOINT:
return LowerSTATEPOINT(*MI, MCInstLowering);

case TargetOpcode::FAULTING_OP:
return LowerFAULTING_OP(*MI, MCInstLowering);

case TargetOpcode::FENTRY_CALL:
return LowerFENTRY_CALL(*MI, MCInstLowering);

case TargetOpcode::PATCHABLE_OP:
return LowerPATCHABLE_OP(*MI, MCInstLowering);

case TargetOpcode::STACKMAP:
return LowerSTACKMAP(*MI);

case TargetOpcode::PATCHPOINT:
return LowerPATCHPOINT(*MI, MCInstLowering);

case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering);

case TargetOpcode::PATCHABLE_RET:
return LowerPATCHABLE_RET(*MI, MCInstLowering);

case TargetOpcode::PATCHABLE_TAIL_CALL:
return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering);

case TargetOpcode::PATCHABLE_EVENT_CALL:
return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering);

case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
return LowerPATCHABLE_TYPED_EVENT_CALL(*MI, MCInstLowering);

case X86::MORESTACK_RET:
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
return;

case X86::MORESTACK_RET_RESTORE_R10:
// Return, then restore R10.
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
EmitAndCountInstruction(
MCInstBuilder(X86::MOV64rr).addReg(X86::R10).addReg(X86::RAX));
return;

case X86::SEH_PushReg:
case X86::SEH_SaveReg:
case X86::SEH_SaveXMM:
case X86::SEH_StackAlloc:
case X86::SEH_StackAlign:
case X86::SEH_SetFrame:
case X86::SEH_PushFrame:
case X86::SEH_EndPrologue:
EmitSEHInstruction(MI);
return;

case X86::SEH_Epilogue: {
assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
MachineBasicBlock::const_iterator MBBI(MI);
// Check if preceded by a call and emit nop if so.
for (MBBI = PrevCrossBBInst(MBBI);
MBBI != MachineBasicBlock::const_iterator();
MBBI = PrevCrossBBInst(MBBI)) {
// Conservatively assume that pseudo instructions don't emit code and keep
// looking for a call. We may emit an unnecessary nop in some cases.
if (!MBBI->isPseudo()) {
if (MBBI->isCall())
EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
break;
}
}
return;
}

// Lower PSHUFB and VPERMILP normally but add a comment if we can find
// a constant shuffle mask. We won't be able to do this at the MC layer
// because the mask isn't an immediate.
Expand All @@ -2208,38 +1984,27 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
case X86::VPSHUFBZrm:
case X86::VPSHUFBZrmk:
case X86::VPSHUFBZrmkz: {
if (!OutStreamer->isVerboseAsm())
break;
unsigned SrcIdx, MaskIdx;
switch (MI->getOpcode()) {
default: llvm_unreachable("Invalid opcode");
case X86::PSHUFBrm:
case X86::VPSHUFBrm:
case X86::VPSHUFBYrm:
case X86::VPSHUFBZ128rm:
case X86::VPSHUFBZ256rm:
case X86::VPSHUFBZrm:
SrcIdx = 1; MaskIdx = 5; break;
case X86::VPSHUFBZ128rmkz:
case X86::VPSHUFBZ256rmkz:
case X86::VPSHUFBZrmkz:
SrcIdx = 2; MaskIdx = 6; break;
case X86::VPSHUFBZ128rmk:
case X86::VPSHUFBZ256rmk:
case X86::VPSHUFBZrmk:
SrcIdx = 3; MaskIdx = 7; break;
unsigned SrcIdx = 1;
if (X86II::isKMasked(MI->getDesc().TSFlags)) {
// Skip mask operand.
++SrcIdx;
if (X86II::isKMergeMasked(MI->getDesc().TSFlags)) {
// Skip passthru operand.
++SrcIdx;
}
}
unsigned MaskIdx = SrcIdx + 1 + X86::AddrDisp;

assert(MI->getNumOperands() >= 6 &&
"We should always have at least 6 operands!");
assert(MI->getNumOperands() >= (SrcIdx + 1 + X86::AddrNumOperands) &&
"Unexpected number of operands!");

const MachineOperand &MaskOp = MI->getOperand(MaskIdx);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
SmallVector<int, 64> Mask;
DecodePSHUFBMask(C, Width, Mask);
if (!Mask.empty())
OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
OutStreamer.AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
}
break;
}
Expand All @@ -2266,9 +2031,6 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
case X86::VPERMILPDZrm:
case X86::VPERMILPDZrmk:
case X86::VPERMILPDZrmkz: {
if (!OutStreamer->isVerboseAsm())
break;
unsigned SrcIdx, MaskIdx;
unsigned ElSize;
switch (MI->getOpcode()) {
default: llvm_unreachable("Invalid opcode");
Expand All @@ -2277,41 +2039,50 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
case X86::VPERMILPSZ128rm:
case X86::VPERMILPSZ256rm:
case X86::VPERMILPSZrm:
SrcIdx = 1; MaskIdx = 5; ElSize = 32; break;
case X86::VPERMILPSZ128rmkz:
case X86::VPERMILPSZ256rmkz:
case X86::VPERMILPSZrmkz:
SrcIdx = 2; MaskIdx = 6; ElSize = 32; break;
case X86::VPERMILPSZ128rmk:
case X86::VPERMILPSZ256rmk:
case X86::VPERMILPSZrmk:
SrcIdx = 3; MaskIdx = 7; ElSize = 32; break;
ElSize = 32;
break;
case X86::VPERMILPDrm:
case X86::VPERMILPDYrm:
case X86::VPERMILPDZ128rm:
case X86::VPERMILPDZ256rm:
case X86::VPERMILPDZrm:
SrcIdx = 1; MaskIdx = 5; ElSize = 64; break;
case X86::VPERMILPDZ128rmkz:
case X86::VPERMILPDZ256rmkz:
case X86::VPERMILPDZrmkz:
SrcIdx = 2; MaskIdx = 6; ElSize = 64; break;
case X86::VPERMILPDZ128rmk:
case X86::VPERMILPDZ256rmk:
case X86::VPERMILPDZrmk:
SrcIdx = 3; MaskIdx = 7; ElSize = 64; break;
ElSize = 64;
break;
}

assert(MI->getNumOperands() >= 6 &&
"We should always have at least 6 operands!");
unsigned SrcIdx = 1;
if (X86II::isKMasked(MI->getDesc().TSFlags)) {
// Skip mask operand.
++SrcIdx;
if (X86II::isKMergeMasked(MI->getDesc().TSFlags)) {
// Skip passthru operand.
++SrcIdx;
}
}
unsigned MaskIdx = SrcIdx + 1 + X86::AddrDisp;

assert(MI->getNumOperands() >= (SrcIdx + 1 + X86::AddrNumOperands) &&
"Unexpected number of operands!");

const MachineOperand &MaskOp = MI->getOperand(MaskIdx);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
SmallVector<int, 16> Mask;
DecodeVPERMILPMask(C, ElSize, Width, Mask);
if (!Mask.empty())
OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
OutStreamer.AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
}
break;
}
Expand All @@ -2320,10 +2091,8 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
case X86::VPERMIL2PSrm:
case X86::VPERMIL2PDYrm:
case X86::VPERMIL2PSYrm: {
if (!OutStreamer->isVerboseAsm())
break;
assert(MI->getNumOperands() >= 8 &&
"We should always have at least 8 operands!");
assert(MI->getNumOperands() >= (3 + X86::AddrNumOperands + 1) &&
"Unexpected number of operands!");

const MachineOperand &CtrlOp = MI->getOperand(MI->getNumOperands() - 1);
if (!CtrlOp.isImm())
Expand All @@ -2336,47 +2105,43 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
case X86::VPERMIL2PDrm: case X86::VPERMIL2PDYrm: ElSize = 64; break;
}

const MachineOperand &MaskOp = MI->getOperand(6);
const MachineOperand &MaskOp = MI->getOperand(3 + X86::AddrDisp);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
SmallVector<int, 16> Mask;
DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Width, Mask);
if (!Mask.empty())
OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask));
OutStreamer.AddComment(getShuffleComment(MI, 1, 2, Mask));
}
break;
}

case X86::VPPERMrrm: {
if (!OutStreamer->isVerboseAsm())
break;
assert(MI->getNumOperands() >= 7 &&
"We should always have at least 7 operands!");
assert(MI->getNumOperands() >= (3 + X86::AddrNumOperands) &&
"Unexpected number of operands!");

const MachineOperand &MaskOp = MI->getOperand(6);
const MachineOperand &MaskOp = MI->getOperand(3 + X86::AddrDisp);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
SmallVector<int, 16> Mask;
DecodeVPPERMMask(C, Width, Mask);
if (!Mask.empty())
OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask));
OutStreamer.AddComment(getShuffleComment(MI, 1, 2, Mask));
}
break;
}

case X86::MMX_MOVQ64rm: {
if (!OutStreamer->isVerboseAsm())
break;
if (MI->getNumOperands() <= 4)
break;
if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) {
assert(MI->getNumOperands() == (1 + X86::AddrNumOperands) &&
"Unexpected number of operands!");
if (auto *C = getConstantFromPool(*MI, MI->getOperand(1 + X86::AddrDisp))) {
std::string Comment;
raw_string_ostream CS(Comment);
const MachineOperand &DstOp = MI->getOperand(0);
CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = ";
if (auto *CF = dyn_cast<ConstantFP>(C)) {
CS << "0x" << CF->getValueAPF().bitcastToAPInt().toString(16, false);
OutStreamer->AddComment(CS.str());
OutStreamer.AddComment(CS.str());
}
}
break;
Expand Down Expand Up @@ -2427,11 +2192,9 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
case X86::VBROADCASTI64X2Z128rm:
case X86::VBROADCASTI64X2rm:
case X86::VBROADCASTI64X4rm:
if (!OutStreamer->isVerboseAsm())
break;
if (MI->getNumOperands() <= 4)
break;
if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) {
assert(MI->getNumOperands() >= (1 + X86::AddrNumOperands) &&
"Unexpected number of operands!");
if (auto *C = getConstantFromPool(*MI, MI->getOperand(1 + X86::AddrDisp))) {
int NumLanes = 1;
// Override NumLanes for the broadcast instructions.
switch (MI->getOpcode()) {
Expand Down Expand Up @@ -2473,7 +2236,7 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
}
}
CS << "]";
OutStreamer->AddComment(CS.str());
OutStreamer.AddComment(CS.str());
} else if (auto *CV = dyn_cast<ConstantVector>(C)) {
CS << "<";
for (int l = 0; l != NumLanes; ++l) {
Expand All @@ -2485,10 +2248,11 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
}
}
CS << ">";
OutStreamer->AddComment(CS.str());
OutStreamer.AddComment(CS.str());
}
}
break;

case X86::MOVDDUPrm:
case X86::VMOVDDUPrm:
case X86::VMOVDDUPZ128rm:
Expand Down Expand Up @@ -2520,11 +2284,9 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
case X86::VPBROADCASTWZ128rm:
case X86::VPBROADCASTWZ256rm:
case X86::VPBROADCASTWZrm:
if (!OutStreamer->isVerboseAsm())
break;
if (MI->getNumOperands() <= 4)
break;
if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) {
assert(MI->getNumOperands() >= (1 + X86::AddrNumOperands) &&
"Unexpected number of operands!");
if (auto *C = getConstantFromPool(*MI, MI->getOperand(1 + X86::AddrDisp))) {
int NumElts;
switch (MI->getOpcode()) {
default: llvm_unreachable("Invalid opcode");
Expand Down Expand Up @@ -2572,9 +2334,242 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
printConstant(C, CS);
}
CS << "]";
OutStreamer->AddComment(CS.str());
OutStreamer.AddComment(CS.str());
}
}
}

void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
X86MCInstLower MCInstLowering(*MF, *this);
const X86RegisterInfo *RI =
MF->getSubtarget<X86Subtarget>().getRegisterInfo();

// Add a comment about EVEX-2-VEX compression for AVX-512 instrs that
// are compressed from EVEX encoding to VEX encoding.
if (TM.Options.MCOptions.ShowMCEncoding) {
if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_VEX)
OutStreamer->AddComment("EVEX TO VEX Compression ", false);
}

// Add comments for values loaded from constant pool.
if (OutStreamer->isVerboseAsm())
addConstantComments(MI, *OutStreamer);

switch (MI->getOpcode()) {
case TargetOpcode::DBG_VALUE:
llvm_unreachable("Should be handled target independently");

// Emit nothing here but a comment if we can.
case X86::Int_MemBarrier:
OutStreamer->emitRawComment("MEMBARRIER");
return;

case X86::EH_RETURN:
case X86::EH_RETURN64: {
// Lower these as normal, but add some comments.
Register Reg = MI->getOperand(0).getReg();
OutStreamer->AddComment(StringRef("eh_return, addr: %") +
X86ATTInstPrinter::getRegisterName(Reg));
break;
}
case X86::CLEANUPRET: {
// Lower these as normal, but add some comments.
OutStreamer->AddComment("CLEANUPRET");
break;
}

case X86::CATCHRET: {
// Lower these as normal, but add some comments.
OutStreamer->AddComment("CATCHRET");
break;
}

case X86::ENDBR32:
case X86::ENDBR64: {
// CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
// -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
// non-empty. If MI is the initial ENDBR, place the
// __patchable_function_entries label after ENDBR.
if (CurrentPatchableFunctionEntrySym &&
CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
MI == &MF->front().front()) {
MCInst Inst;
MCInstLowering.Lower(MI, Inst);
EmitAndCountInstruction(Inst);
CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
return;
}
break;
}

case X86::TAILJMPr:
case X86::TAILJMPm:
case X86::TAILJMPd:
case X86::TAILJMPd_CC:
case X86::TAILJMPr64:
case X86::TAILJMPm64:
case X86::TAILJMPd64:
case X86::TAILJMPd64_CC:
case X86::TAILJMPr64_REX:
case X86::TAILJMPm64_REX:
// Lower these as normal, but add some comments.
OutStreamer->AddComment("TAILCALL");
break;

case X86::TLS_addr32:
case X86::TLS_addr64:
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
return LowerTlsAddr(MCInstLowering, *MI);

case X86::MOVPC32r: {
// This is a pseudo op for a two instruction sequence with a label, which
// looks like:
// call "L1$pb"
// "L1$pb":
// popl %esi

// Emit the call.
MCSymbol *PICBase = MF->getPICBaseSymbol();
// FIXME: We would like an efficient form for this, so we don't have to do a
// lot of extra uniquing.
EmitAndCountInstruction(
MCInstBuilder(X86::CALLpcrel32)
.addExpr(MCSymbolRefExpr::create(PICBase, OutContext)));

const X86FrameLowering *FrameLowering =
MF->getSubtarget<X86Subtarget>().getFrameLowering();
bool hasFP = FrameLowering->hasFP(*MF);

// TODO: This is needed only if we require precise CFA.
bool HasActiveDwarfFrame = OutStreamer->getNumFrameInfos() &&
!OutStreamer->getDwarfFrameInfos().back().End;

int stackGrowth = -RI->getSlotSize();

if (HasActiveDwarfFrame && !hasFP) {
OutStreamer->emitCFIAdjustCfaOffset(-stackGrowth);
}

// Emit the label.
OutStreamer->emitLabel(PICBase);

// popl $reg
EmitAndCountInstruction(
MCInstBuilder(X86::POP32r).addReg(MI->getOperand(0).getReg()));

if (HasActiveDwarfFrame && !hasFP) {
OutStreamer->emitCFIAdjustCfaOffset(stackGrowth);
}
return;
}

case X86::ADD32ri: {
// Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS)
break;

// Okay, we have something like:
// EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL)

// For this, we want to print something like:
// MYGLOBAL + (. - PICBASE)
// However, we can't generate a ".", so just emit a new label here and refer
// to it.
MCSymbol *DotSym = OutContext.createTempSymbol();
OutStreamer->emitLabel(DotSym);

// Now that we have emitted the label, lower the complex operand expression.
MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2));

const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext);
const MCExpr *PICBase =
MCSymbolRefExpr::create(MF->getPICBaseSymbol(), OutContext);
DotExpr = MCBinaryExpr::createSub(DotExpr, PICBase, OutContext);

DotExpr = MCBinaryExpr::createAdd(
MCSymbolRefExpr::create(OpSym, OutContext), DotExpr, OutContext);

EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri)
.addReg(MI->getOperand(0).getReg())
.addReg(MI->getOperand(1).getReg())
.addExpr(DotExpr));
return;
}
case TargetOpcode::STATEPOINT:
return LowerSTATEPOINT(*MI, MCInstLowering);

case TargetOpcode::FAULTING_OP:
return LowerFAULTING_OP(*MI, MCInstLowering);

case TargetOpcode::FENTRY_CALL:
return LowerFENTRY_CALL(*MI, MCInstLowering);

case TargetOpcode::PATCHABLE_OP:
return LowerPATCHABLE_OP(*MI, MCInstLowering);

case TargetOpcode::STACKMAP:
return LowerSTACKMAP(*MI);

case TargetOpcode::PATCHPOINT:
return LowerPATCHPOINT(*MI, MCInstLowering);

case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering);

case TargetOpcode::PATCHABLE_RET:
return LowerPATCHABLE_RET(*MI, MCInstLowering);

case TargetOpcode::PATCHABLE_TAIL_CALL:
return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering);

case TargetOpcode::PATCHABLE_EVENT_CALL:
return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering);

case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
return LowerPATCHABLE_TYPED_EVENT_CALL(*MI, MCInstLowering);

case X86::MORESTACK_RET:
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
return;

case X86::MORESTACK_RET_RESTORE_R10:
// Return, then restore R10.
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
EmitAndCountInstruction(
MCInstBuilder(X86::MOV64rr).addReg(X86::R10).addReg(X86::RAX));
return;

case X86::SEH_PushReg:
case X86::SEH_SaveReg:
case X86::SEH_SaveXMM:
case X86::SEH_StackAlloc:
case X86::SEH_StackAlign:
case X86::SEH_SetFrame:
case X86::SEH_PushFrame:
case X86::SEH_EndPrologue:
EmitSEHInstruction(MI);
return;

case X86::SEH_Epilogue: {
assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
MachineBasicBlock::const_iterator MBBI(MI);
// Check if preceded by a call and emit nop if so.
for (MBBI = PrevCrossBBInst(MBBI);
MBBI != MachineBasicBlock::const_iterator();
MBBI = PrevCrossBBInst(MBBI)) {
// Conservatively assume that pseudo instructions don't emit code and keep
// looking for a call. We may emit an unnecessary nop in some cases.
if (!MBBI->isPseudo()) {
if (MBBI->isCall())
EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
break;
}
}
return;
}
}

MCInst TmpInst;
MCInstLowering.Lower(MI, TmpInst);
Expand Down