| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,81 @@ | ||
| // REQUIRES: x86 | ||
| // RUN: llvm-mc -filetype=obj -triple=i386-unknown-linux %s -o %t1.o | ||
| // RUN: llvm-mc -filetype=obj -triple=i386-unknown-linux %p/Inputs/shared.s -o %t2.o | ||
| // RUN: ld.lld -shared %t2.o -o %t2.so | ||
|
|
||
| // RUN: ld.lld %t1.o %t2.so -o %t.exe -z retpolineplt | ||
| // RUN: llvm-objdump -d -s %t.exe | FileCheck %s | ||
|
|
||
| // CHECK: Disassembly of section .plt: | ||
| // CHECK-NEXT: .plt: | ||
| // CHECK-NEXT: 11010: ff 35 04 20 01 00 pushl 73732 | ||
| // CHECK-NEXT: 11016: 50 pushl %eax | ||
| // CHECK-NEXT: 11017: a1 08 20 01 00 movl 73736, %eax | ||
| // CHECK-NEXT: 1101c: e8 0f 00 00 00 calll 15 <.plt+0x20> | ||
| // CHECK-NEXT: 11021: f3 90 pause | ||
| // CHECK-NEXT: 11023: 0f ae e8 lfence | ||
| // CHECK-NEXT: 11026: eb f9 jmp -7 <.plt+0x11> | ||
| // CHECK-NEXT: 11028: cc int3 | ||
| // CHECK-NEXT: 11029: cc int3 | ||
| // CHECK-NEXT: 1102a: cc int3 | ||
| // CHECK-NEXT: 1102b: cc int3 | ||
| // CHECK-NEXT: 1102c: cc int3 | ||
| // CHECK-NEXT: 1102d: cc int3 | ||
| // CHECK-NEXT: 1102e: cc int3 | ||
| // CHECK-NEXT: 1102f: cc int3 | ||
| // CHECK-NEXT: 11030: 89 0c 24 movl %ecx, (%esp) | ||
| // CHECK-NEXT: 11033: 8b 4c 24 04 movl 4(%esp), %ecx | ||
| // CHECK-NEXT: 11037: 89 44 24 04 movl %eax, 4(%esp) | ||
| // CHECK-NEXT: 1103b: 89 c8 movl %ecx, %eax | ||
| // CHECK-NEXT: 1103d: 59 popl %ecx | ||
| // CHECK-NEXT: 1103e: c3 retl | ||
| // CHECK-NEXT: 1103f: cc int3 | ||
| // CHECK-NEXT: 11040: cc int3 | ||
| // CHECK-NEXT: 11041: cc int3 | ||
| // CHECK-NEXT: 11042: cc int3 | ||
| // CHECK-NEXT: 11043: cc int3 | ||
| // CHECK-NEXT: 11044: cc int3 | ||
| // CHECK-NEXT: 11045: cc int3 | ||
| // CHECK-NEXT: 11046: cc int3 | ||
| // CHECK-NEXT: 11047: cc int3 | ||
| // CHECK-NEXT: 11048: cc int3 | ||
| // CHECK-NEXT: 11049: cc int3 | ||
| // CHECK-NEXT: 1104a: cc int3 | ||
| // CHECK-NEXT: 1104b: cc int3 | ||
| // CHECK-NEXT: 1104c: cc int3 | ||
| // CHECK-NEXT: 1104d: cc int3 | ||
| // CHECK-NEXT: 1104e: cc int3 | ||
| // CHECK-NEXT: 1104f: cc int3 | ||
| // CHECK-NEXT: 11050: 50 pushl %eax | ||
| // CHECK-NEXT: 11051: a1 0c 20 01 00 movl 73740, %eax | ||
| // CHECK-NEXT: 11056: e8 d5 ff ff ff calll -43 <.plt+0x20> | ||
| // CHECK-NEXT: 1105b: e9 c1 ff ff ff jmp -63 <.plt+0x11> | ||
| // CHECK-NEXT: 11060: 68 00 00 00 00 pushl $0 | ||
| // CHECK-NEXT: 11065: e9 a6 ff ff ff jmp -90 <.plt> | ||
| // CHECK-NEXT: 1106a: cc int3 | ||
| // CHECK-NEXT: 1106b: cc int3 | ||
| // CHECK-NEXT: 1106c: cc int3 | ||
| // CHECK-NEXT: 1106d: cc int3 | ||
| // CHECK-NEXT: 1106e: cc int3 | ||
| // CHECK-NEXT: 1106f: cc int3 | ||
| // CHECK-NEXT: 11070: 50 pushl %eax | ||
| // CHECK-NEXT: 11071: a1 10 20 01 00 movl 73744, %eax | ||
| // CHECK-NEXT: 11076: e8 b5 ff ff ff calll -75 <.plt+0x20> | ||
| // CHECK-NEXT: 1107b: e9 a1 ff ff ff jmp -95 <.plt+0x11> | ||
| // CHECK-NEXT: 11080: 68 08 00 00 00 pushl $8 | ||
| // CHECK-NEXT: 11085: e9 86 ff ff ff jmp -122 <.plt> | ||
| // CHECK-NEXT: 1108a: cc int3 | ||
| // CHECK-NEXT: 1108b: cc int3 | ||
| // CHECK-NEXT: 1108c: cc int3 | ||
| // CHECK-NEXT: 1108d: cc int3 | ||
| // CHECK-NEXT: 1108e: cc int3 | ||
| // CHECK-NEXT: 1108f: cc int3 | ||
|
|
||
| // CHECK: Contents of section .got.plt: | ||
| // CHECK-NEXT: 00300100 00000000 00000000 60100100 | ||
| // CHECK-NEXT: 80100100 | ||
|
|
||
| .global _start | ||
| _start: | ||
| jmp bar@PLT | ||
| jmp zed@PLT |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,62 @@ | ||
| // REQUIRES: x86 | ||
| // RUN: llvm-mc -filetype=obj -triple=i386-unknown-linux -position-independent %s -o %t1.o | ||
| // RUN: llvm-mc -filetype=obj -triple=i386-unknown-linux -position-independent %p/Inputs/shared.s -o %t2.o | ||
| // RUN: ld.lld -shared %t2.o -o %t2.so | ||
|
|
||
| // RUN: ld.lld %t1.o %t2.so -o %t.exe -z retpolineplt -pie | ||
| // RUN: llvm-objdump -d -s %t.exe | FileCheck %s | ||
|
|
||
| // CHECK: Disassembly of section .plt: | ||
| // CHECK-NEXT: .plt: | ||
| // CHECK-NEXT: 1010: ff b3 04 20 00 00 pushl 8196(%ebx) | ||
| // CHECK-NEXT: 1016: 50 pushl %eax | ||
| // CHECK-NEXT: 1017: 8b 83 08 20 00 00 movl 8200(%ebx), %eax | ||
| // CHECK-NEXT: 101d: e8 0e 00 00 00 calll 14 <.plt+0x20> | ||
| // CHECK-NEXT: 1022: f3 90 pause | ||
| // CHECK-NEXT: 1024: 0f ae e8 lfence | ||
| // CHECK-NEXT: 1027: eb f9 jmp -7 <.plt+0x12> | ||
| // CHECK-NEXT: 1029: cc int3 | ||
| // CHECK-NEXT: 102a: cc int3 | ||
| // CHECK-NEXT: 102b: cc int3 | ||
| // CHECK-NEXT: 102c: cc int3 | ||
| // CHECK-NEXT: 102d: cc int3 | ||
| // CHECK-NEXT: 102e: cc int3 | ||
| // CHECK-NEXT: 102f: cc int3 | ||
| // CHECK-NEXT: 1030: 89 0c 24 movl %ecx, (%esp) | ||
| // CHECK-NEXT: 1033: 8b 4c 24 04 movl 4(%esp), %ecx | ||
| // CHECK-NEXT: 1037: 89 44 24 04 movl %eax, 4(%esp) | ||
| // CHECK-NEXT: 103b: 89 c8 movl %ecx, %eax | ||
| // CHECK-NEXT: 103d: 59 popl %ecx | ||
| // CHECK-NEXT: 103e: c3 retl | ||
| // CHECK-NEXT: 103f: cc int3 | ||
| // CHECK-NEXT: 1040: 50 pushl %eax | ||
| // CHECK-NEXT: 1041: 8b 83 0c 20 00 00 movl 8204(%ebx), %eax | ||
| // CHECK-NEXT: 1047: e8 e4 ff ff ff calll -28 <.plt+0x20> | ||
| // CHECK-NEXT: 104c: e9 d1 ff ff ff jmp -47 <.plt+0x12> | ||
| // CHECK-NEXT: 1051: 68 00 00 00 00 pushl $0 | ||
| // CHECK-NEXT: 1056: e9 b5 ff ff ff jmp -75 <.plt> | ||
| // CHECK-NEXT: 105b: cc int3 | ||
| // CHECK-NEXT: 105c: cc int3 | ||
| // CHECK-NEXT: 105d: cc int3 | ||
| // CHECK-NEXT: 105e: cc int3 | ||
| // CHECK-NEXT: 105f: cc int3 | ||
| // CHECK-NEXT: 1060: 50 pushl %eax | ||
| // CHECK-NEXT: 1061: 8b 83 10 20 00 00 movl 8208(%ebx), %eax | ||
| // CHECK-NEXT: 1067: e8 c4 ff ff ff calll -60 <.plt+0x20> | ||
| // CHECK-NEXT: 106c: e9 b1 ff ff ff jmp -79 <.plt+0x12> | ||
| // CHECK-NEXT: 1071: 68 08 00 00 00 pushl $8 | ||
| // CHECK-NEXT: 1076: e9 95 ff ff ff jmp -107 <.plt> | ||
| // CHECK-NEXT: 107b: cc int3 | ||
| // CHECK-NEXT: 107c: cc int3 | ||
| // CHECK-NEXT: 107d: cc int3 | ||
| // CHECK-NEXT: 107e: cc int3 | ||
| // CHECK-NEXT: 107f: cc int3 | ||
|
|
||
| // CHECK: Contents of section .got.plt: | ||
| // CHECK-NEXT: 2000 00300000 00000000 00000000 51100000 | ||
| // CHECK-NEXT: 2010 71100000 | ||
|
|
||
| .global _start | ||
| _start: | ||
| jmp bar@PLT | ||
| jmp zed@PLT |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,53 @@ | ||
| // REQUIRES: x86 | ||
| // RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux %s -o %t1.o | ||
| // RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux %p/Inputs/shared.s -o %t2.o | ||
| // RUN: ld.lld -shared %t2.o -o %t2.so | ||
|
|
||
| // RUN: ld.lld -shared %t1.o %t2.so -o %t.exe -z retpolineplt -z now | ||
| // RUN: llvm-objdump -d -s %t.exe | FileCheck %s | ||
|
|
||
| // CHECK: Disassembly of section .plt: | ||
| // CHECK-NEXT: .plt: | ||
| // CHECK-NEXT: 1010: e8 0b 00 00 00 callq 11 <.plt+0x10> | ||
| // CHECK-NEXT: 1015: f3 90 pause | ||
| // CHECK-NEXT: 1017: 0f ae e8 lfence | ||
| // CHECK-NEXT: 101a: eb f9 jmp -7 <.plt+0x5> | ||
| // CHECK-NEXT: 101c: cc int3 | ||
| // CHECK-NEXT: 101d: cc int3 | ||
| // CHECK-NEXT: 101e: cc int3 | ||
| // CHECK-NEXT: 101f: cc int3 | ||
| // CHECK-NEXT: 1020: 4c 89 1c 24 movq %r11, (%rsp) | ||
| // CHECK-NEXT: 1024: c3 retq | ||
| // CHECK-NEXT: 1025: cc int3 | ||
| // CHECK-NEXT: 1026: cc int3 | ||
| // CHECK-NEXT: 1027: cc int3 | ||
| // CHECK-NEXT: 1028: cc int3 | ||
| // CHECK-NEXT: 1029: cc int3 | ||
| // CHECK-NEXT: 102a: cc int3 | ||
| // CHECK-NEXT: 102b: cc int3 | ||
| // CHECK-NEXT: 102c: cc int3 | ||
| // CHECK-NEXT: 102d: cc int3 | ||
| // CHECK-NEXT: 102e: cc int3 | ||
| // CHECK-NEXT: 102f: cc int3 | ||
| // CHECK-NEXT: 1030: 4c 8b 1d c1 10 00 00 movq 4289(%rip), %r11 | ||
| // CHECK-NEXT: 1037: e9 d4 ff ff ff jmp -44 <.plt> | ||
| // CHECK-NEXT: 103c: cc int3 | ||
| // CHECK-NEXT: 103d: cc int3 | ||
| // CHECK-NEXT: 103e: cc int3 | ||
| // CHECK-NEXT: 103f: cc int3 | ||
| // CHECK-NEXT: 1040: 4c 8b 1d b9 10 00 00 movq 4281(%rip), %r11 | ||
| // CHECK-NEXT: 1047: e9 c4 ff ff ff jmp -60 <.plt> | ||
| // CHECK-NEXT: 104c: cc int3 | ||
| // CHECK-NEXT: 104d: cc int3 | ||
| // CHECK-NEXT: 104e: cc int3 | ||
| // CHECK-NEXT: 104f: cc int3 | ||
|
|
||
| // CHECK: Contents of section .got.plt: | ||
| // CHECK-NEXT: 20e0 00200000 00000000 00000000 00000000 | ||
| // CHECK-NEXT: 20f0 00000000 00000000 00000000 00000000 | ||
| // CHECK-NEXT: 2100 00000000 00000000 | ||
|
|
||
| .global _start | ||
| _start: | ||
| jmp bar@PLT | ||
| jmp zed@PLT |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,66 @@ | ||
| // REQUIRES: x86 | ||
| // RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux %s -o %t1.o | ||
| // RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux %p/Inputs/shared.s -o %t2.o | ||
| // RUN: ld.lld -shared %t2.o -o %t2.so | ||
|
|
||
| // RUN: ld.lld -shared %t1.o %t2.so -o %t.exe -z retpolineplt | ||
| // RUN: llvm-objdump -d -s %t.exe | FileCheck %s | ||
|
|
||
| // CHECK: Disassembly of section .plt: | ||
| // CHECK-NEXT: .plt: | ||
| // CHECK-NEXT: 1010: ff 35 f2 0f 00 00 pushq 4082(%rip) | ||
| // CHECK-NEXT: 1016: 4c 8b 1d f3 0f 00 00 movq 4083(%rip), %r11 | ||
| // CHECK-NEXT: 101d: e8 0e 00 00 00 callq 14 <.plt+0x20> | ||
| // CHECK-NEXT: 1022: f3 90 pause | ||
| // CHECK-NEXT: 1024: 0f ae e8 lfence | ||
| // CHECK-NEXT: 1027: eb f9 jmp -7 <.plt+0x12> | ||
| // CHECK-NEXT: 1029: cc int3 | ||
| // CHECK-NEXT: 102a: cc int3 | ||
| // CHECK-NEXT: 102b: cc int3 | ||
| // CHECK-NEXT: 102c: cc int3 | ||
| // CHECK-NEXT: 102d: cc int3 | ||
| // CHECK-NEXT: 102e: cc int3 | ||
| // CHECK-NEXT: 102f: cc int3 | ||
| // CHECK-NEXT: 1030: 4c 89 1c 24 movq %r11, (%rsp) | ||
| // CHECK-NEXT: 1034: c3 retq | ||
| // CHECK-NEXT: 1035: cc int3 | ||
| // CHECK-NEXT: 1036: cc int3 | ||
| // CHECK-NEXT: 1037: cc int3 | ||
| // CHECK-NEXT: 1038: cc int3 | ||
| // CHECK-NEXT: 1039: cc int3 | ||
| // CHECK-NEXT: 103a: cc int3 | ||
| // CHECK-NEXT: 103b: cc int3 | ||
| // CHECK-NEXT: 103c: cc int3 | ||
| // CHECK-NEXT: 103d: cc int3 | ||
| // CHECK-NEXT: 103e: cc int3 | ||
| // CHECK-NEXT: 103f: cc int3 | ||
| // CHECK-NEXT: 1040: 4c 8b 1d d1 0f 00 00 movq 4049(%rip), %r11 | ||
| // CHECK-NEXT: 1047: e8 e4 ff ff ff callq -28 <.plt+0x20> | ||
| // CHECK-NEXT: 104c: e9 d1 ff ff ff jmp -47 <.plt+0x12> | ||
| // CHECK-NEXT: 1051: 68 00 00 00 00 pushq $0 | ||
| // CHECK-NEXT: 1056: e9 b5 ff ff ff jmp -75 <.plt> | ||
| // CHECK-NEXT: 105b: cc int3 | ||
| // CHECK-NEXT: 105c: cc int3 | ||
| // CHECK-NEXT: 105d: cc int3 | ||
| // CHECK-NEXT: 105e: cc int3 | ||
| // CHECK-NEXT: 105f: cc int3 | ||
| // CHECK-NEXT: 1060: 4c 8b 1d b9 0f 00 00 movq 4025(%rip), %r11 | ||
| // CHECK-NEXT: 1067: e8 c4 ff ff ff callq -60 <.plt+0x20> | ||
| // CHECK-NEXT: 106c: e9 b1 ff ff ff jmp -79 <.plt+0x12> | ||
| // CHECK-NEXT: 1071: 68 01 00 00 00 pushq $1 | ||
| // CHECK-NEXT: 1076: e9 95 ff ff ff jmp -107 <.plt> | ||
| // CHECK-NEXT: 107b: cc int3 | ||
| // CHECK-NEXT: 107c: cc int3 | ||
| // CHECK-NEXT: 107d: cc int3 | ||
| // CHECK-NEXT: 107e: cc int3 | ||
| // CHECK-NEXT: 107f: cc int3 | ||
|
|
||
| // CHECK: Contents of section .got.plt: | ||
| // CHECK-NEXT: 2000 00300000 00000000 00000000 00000000 | ||
| // CHECK-NEXT: 2010 00000000 00000000 51100000 00000000 | ||
| // CHECK-NEXT: 2020 71100000 00000000 | ||
|
|
||
| .global _start | ||
| _start: | ||
| jmp bar@PLT | ||
| jmp zed@PLT |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,221 @@ | ||
| //===- IndirectBrExpandPass.cpp - Expand indirectbr to switch -------------===// | ||
| // | ||
| // The LLVM Compiler Infrastructure | ||
| // | ||
| // This file is distributed under the University of Illinois Open Source | ||
| // License. See LICENSE.TXT for details. | ||
| // | ||
| //===----------------------------------------------------------------------===// | ||
| /// \file | ||
| /// | ||
| /// Implements an expansion pass to turn `indirectbr` instructions in the IR | ||
| /// into `switch` instructions. This works by enumerating the basic blocks in | ||
| /// a dense range of integers, replacing each `blockaddr` constant with the | ||
| /// corresponding integer constant, and then building a switch that maps from | ||
| /// the integers to the actual blocks. All of the indirectbr instructions in the | ||
| /// function are redirected to this common switch. | ||
| /// | ||
| /// While this is generically useful if a target is unable to codegen | ||
| /// `indirectbr` natively, it is primarily useful when there is some desire to | ||
| /// get the builtin non-jump-table lowering of a switch even when the input | ||
| /// source contained an explicit indirect branch construct. | ||
| /// | ||
| /// Note that it doesn't make any sense to enable this pass unless a target also | ||
| /// disables jump-table lowering of switches. Doing that is likely to pessimize | ||
| /// the code. | ||
| /// | ||
| //===----------------------------------------------------------------------===// | ||
|
|
||
| #include "llvm/ADT/STLExtras.h" | ||
| #include "llvm/ADT/Sequence.h" | ||
| #include "llvm/ADT/SmallVector.h" | ||
| #include "llvm/CodeGen/TargetPassConfig.h" | ||
| #include "llvm/CodeGen/TargetSubtargetInfo.h" | ||
| #include "llvm/IR/BasicBlock.h" | ||
| #include "llvm/IR/Function.h" | ||
| #include "llvm/IR/IRBuilder.h" | ||
| #include "llvm/IR/InstIterator.h" | ||
| #include "llvm/IR/Instruction.h" | ||
| #include "llvm/IR/Instructions.h" | ||
| #include "llvm/Pass.h" | ||
| #include "llvm/Support/Debug.h" | ||
| #include "llvm/Support/ErrorHandling.h" | ||
| #include "llvm/Support/raw_ostream.h" | ||
| #include "llvm/Target/TargetMachine.h" | ||
|
|
||
| using namespace llvm; | ||
|
|
||
| #define DEBUG_TYPE "indirectbr-expand" | ||
|
|
||
| namespace { | ||
|
|
||
| class IndirectBrExpandPass : public FunctionPass { | ||
| const TargetLowering *TLI = nullptr; | ||
|
|
||
| public: | ||
| static char ID; // Pass identification, replacement for typeid | ||
|
|
||
| IndirectBrExpandPass() : FunctionPass(ID) { | ||
| initializeIndirectBrExpandPassPass(*PassRegistry::getPassRegistry()); | ||
| } | ||
|
|
||
| bool runOnFunction(Function &F) override; | ||
| }; | ||
|
|
||
| } // end anonymous namespace | ||
|
|
||
| char IndirectBrExpandPass::ID = 0; | ||
|
|
||
| INITIALIZE_PASS(IndirectBrExpandPass, DEBUG_TYPE, | ||
| "Expand indirectbr instructions", false, false) | ||
|
|
||
| FunctionPass *llvm::createIndirectBrExpandPass() { | ||
| return new IndirectBrExpandPass(); | ||
| } | ||
|
|
||
| bool IndirectBrExpandPass::runOnFunction(Function &F) { | ||
| auto &DL = F.getParent()->getDataLayout(); | ||
| auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); | ||
| if (!TPC) | ||
| return false; | ||
|
|
||
| auto &TM = TPC->getTM<TargetMachine>(); | ||
| auto &STI = *TM.getSubtargetImpl(F); | ||
| if (!STI.enableIndirectBrExpand()) | ||
| return false; | ||
| TLI = STI.getTargetLowering(); | ||
|
|
||
| SmallVector<IndirectBrInst *, 1> IndirectBrs; | ||
|
|
||
| // Set of all potential successors for indirectbr instructions. | ||
| SmallPtrSet<BasicBlock *, 4> IndirectBrSuccs; | ||
|
|
||
| // Build a list of indirectbrs that we want to rewrite. | ||
| for (BasicBlock &BB : F) | ||
| if (auto *IBr = dyn_cast<IndirectBrInst>(BB.getTerminator())) { | ||
| // Handle the degenerate case of no successors by replacing the indirectbr | ||
| // with unreachable as there is no successor available. | ||
| if (IBr->getNumSuccessors() == 0) { | ||
| (void)new UnreachableInst(F.getContext(), IBr); | ||
| IBr->eraseFromParent(); | ||
| continue; | ||
| } | ||
|
|
||
| IndirectBrs.push_back(IBr); | ||
| for (BasicBlock *SuccBB : IBr->successors()) | ||
| IndirectBrSuccs.insert(SuccBB); | ||
| } | ||
|
|
||
| if (IndirectBrs.empty()) | ||
| return false; | ||
|
|
||
| // If we need to replace any indirectbrs we need to establish integer | ||
| // constants that will correspond to each of the basic blocks in the function | ||
| // whose address escapes. We do that here and rewrite all the blockaddress | ||
| // constants to just be those integer constants cast to a pointer type. | ||
| SmallVector<BasicBlock *, 4> BBs; | ||
|
|
||
| for (BasicBlock &BB : F) { | ||
| // Skip blocks that aren't successors to an indirectbr we're going to | ||
| // rewrite. | ||
| if (!IndirectBrSuccs.count(&BB)) | ||
| continue; | ||
|
|
||
| auto IsBlockAddressUse = [&](const Use &U) { | ||
| return isa<BlockAddress>(U.getUser()); | ||
| }; | ||
| auto BlockAddressUseIt = llvm::find_if(BB.uses(), IsBlockAddressUse); | ||
| if (BlockAddressUseIt == BB.use_end()) | ||
| continue; | ||
|
|
||
| assert(std::find_if(std::next(BlockAddressUseIt), BB.use_end(), | ||
| IsBlockAddressUse) == BB.use_end() && | ||
| "There should only ever be a single blockaddress use because it is " | ||
| "a constant and should be uniqued."); | ||
|
|
||
| auto *BA = cast<BlockAddress>(BlockAddressUseIt->getUser()); | ||
|
|
||
| // Skip if the constant was formed but ended up not being used (due to DCE | ||
| // or whatever). | ||
| if (!BA->isConstantUsed()) | ||
| continue; | ||
|
|
||
| // Compute the index we want to use for this basic block. We can't use zero | ||
| // because null can be compared with block addresses. | ||
| int BBIndex = BBs.size() + 1; | ||
| BBs.push_back(&BB); | ||
|
|
||
| auto *ITy = cast<IntegerType>(DL.getIntPtrType(BA->getType())); | ||
| ConstantInt *BBIndexC = ConstantInt::get(ITy, BBIndex); | ||
|
|
||
| // Now rewrite the blockaddress to an integer constant based on the index. | ||
| // FIXME: We could potentially preserve the uses as arguments to inline asm. | ||
| // This would allow some uses such as diagnostic information in crashes to | ||
| // have higher quality even when this transform is enabled, but would break | ||
| // users that round-trip blockaddresses through inline assembly and then | ||
| // back into an indirectbr. | ||
| BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(BBIndexC, BA->getType())); | ||
| } | ||
|
|
||
| if (BBs.empty()) { | ||
| // There are no blocks whose address is taken, so any indirectbr instruction | ||
| // cannot get a valid input and we can replace all of them with unreachable. | ||
| for (auto *IBr : IndirectBrs) { | ||
| (void)new UnreachableInst(F.getContext(), IBr); | ||
| IBr->eraseFromParent(); | ||
| } | ||
| return true; | ||
| } | ||
|
|
||
| BasicBlock *SwitchBB; | ||
| Value *SwitchValue; | ||
|
|
||
| // Compute a common integer type across all the indirectbr instructions. | ||
| IntegerType *CommonITy = nullptr; | ||
| for (auto *IBr : IndirectBrs) { | ||
| auto *ITy = | ||
| cast<IntegerType>(DL.getIntPtrType(IBr->getAddress()->getType())); | ||
| if (!CommonITy || ITy->getBitWidth() > CommonITy->getBitWidth()) | ||
| CommonITy = ITy; | ||
| } | ||
|
|
||
| auto GetSwitchValue = [DL, CommonITy](IndirectBrInst *IBr) { | ||
| return CastInst::CreatePointerCast( | ||
| IBr->getAddress(), CommonITy, | ||
| Twine(IBr->getAddress()->getName()) + ".switch_cast", IBr); | ||
| }; | ||
|
|
||
| if (IndirectBrs.size() == 1) { | ||
| // If we only have one indirectbr, we can just directly replace it within | ||
| // its block. | ||
| SwitchBB = IndirectBrs[0]->getParent(); | ||
| SwitchValue = GetSwitchValue(IndirectBrs[0]); | ||
| IndirectBrs[0]->eraseFromParent(); | ||
| } else { | ||
| // Otherwise we need to create a new block to hold the switch across BBs, | ||
| // jump to that block instead of each indirectbr, and phi together the | ||
| // values for the switch. | ||
| SwitchBB = BasicBlock::Create(F.getContext(), "switch_bb", &F); | ||
| auto *SwitchPN = PHINode::Create(CommonITy, IndirectBrs.size(), | ||
| "switch_value_phi", SwitchBB); | ||
| SwitchValue = SwitchPN; | ||
|
|
||
| // Now replace the indirectbr instructions with direct branches to the | ||
| // switch block and fill out the PHI operands. | ||
| for (auto *IBr : IndirectBrs) { | ||
| SwitchPN->addIncoming(GetSwitchValue(IBr), IBr->getParent()); | ||
| BranchInst::Create(SwitchBB, IBr); | ||
| IBr->eraseFromParent(); | ||
| } | ||
| } | ||
|
|
||
| // Now build the switch in the block. The block will have no terminator | ||
| // already. | ||
| auto *SI = SwitchInst::Create(SwitchValue, BBs[0], BBs.size(), SwitchBB); | ||
|
|
||
| // Add a case for each block. | ||
| for (int i : llvm::seq<int>(1, BBs.size())) | ||
| SI->addCase(ConstantInt::get(CommonITy, i + 1), BBs[i]); | ||
|
|
||
| return true; | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,276 @@ | ||
| //======- X86RetpolineThunks.cpp - Construct retpoline thunks for x86 --=====// | ||
| // | ||
| // The LLVM Compiler Infrastructure | ||
| // | ||
| // This file is distributed under the University of Illinois Open Source | ||
| // License. See LICENSE.TXT for details. | ||
| // | ||
| //===----------------------------------------------------------------------===// | ||
| /// \file | ||
| /// | ||
| /// Pass that injects an MI thunk implementing a "retpoline". This is | ||
| /// a RET-implemented trampoline that is used to lower indirect calls in a way | ||
| /// that prevents speculation on some x86 processors and can be used to mitigate | ||
| /// security vulnerabilities due to targeted speculative execution and side | ||
| /// channels such as CVE-2017-5715. | ||
| /// | ||
| /// TODO(chandlerc): All of this code could use better comments and | ||
| /// documentation. | ||
| /// | ||
| //===----------------------------------------------------------------------===// | ||
|
|
||
| #include "X86.h" | ||
| #include "X86InstrBuilder.h" | ||
| #include "X86Subtarget.h" | ||
| #include "llvm/CodeGen/MachineFunction.h" | ||
| #include "llvm/CodeGen/MachineInstrBuilder.h" | ||
| #include "llvm/CodeGen/MachineModuleInfo.h" | ||
| #include "llvm/CodeGen/Passes.h" | ||
| #include "llvm/CodeGen/TargetPassConfig.h" | ||
| #include "llvm/IR/IRBuilder.h" | ||
| #include "llvm/IR/Instructions.h" | ||
| #include "llvm/IR/Module.h" | ||
| #include "llvm/Support/CommandLine.h" | ||
| #include "llvm/Support/Debug.h" | ||
| #include "llvm/Support/raw_ostream.h" | ||
|
|
||
| using namespace llvm; | ||
|
|
||
| #define DEBUG_TYPE "x86-retpoline-thunks" | ||
|
|
||
| namespace { | ||
| class X86RetpolineThunks : public ModulePass { | ||
| public: | ||
| static char ID; | ||
|
|
||
| X86RetpolineThunks() : ModulePass(ID) {} | ||
|
|
||
| StringRef getPassName() const override { return "X86 Retpoline Thunks"; } | ||
|
|
||
| bool runOnModule(Module &M) override; | ||
|
|
||
| void getAnalysisUsage(AnalysisUsage &AU) const override { | ||
| AU.addRequired<MachineModuleInfo>(); | ||
| AU.addPreserved<MachineModuleInfo>(); | ||
| } | ||
|
|
||
| private: | ||
| MachineModuleInfo *MMI; | ||
| const TargetMachine *TM; | ||
| bool Is64Bit; | ||
| const X86Subtarget *STI; | ||
| const X86InstrInfo *TII; | ||
|
|
||
| Function *createThunkFunction(Module &M, StringRef Name); | ||
| void insertRegReturnAddrClobber(MachineBasicBlock &MBB, unsigned Reg); | ||
| void insert32BitPushReturnAddrClobber(MachineBasicBlock &MBB); | ||
| void createThunk(Module &M, StringRef NameSuffix, | ||
| Optional<unsigned> Reg = None); | ||
| }; | ||
|
|
||
| } // end anonymous namespace | ||
|
|
||
| ModulePass *llvm::createX86RetpolineThunksPass() { | ||
| return new X86RetpolineThunks(); | ||
| } | ||
|
|
||
| char X86RetpolineThunks::ID = 0; | ||
|
|
||
| bool X86RetpolineThunks::runOnModule(Module &M) { | ||
| DEBUG(dbgs() << getPassName() << '\n'); | ||
|
|
||
| auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); | ||
| assert(TPC && "X86-specific target pass should not be run without a target " | ||
| "pass config!"); | ||
|
|
||
| MMI = &getAnalysis<MachineModuleInfo>(); | ||
| TM = &TPC->getTM<TargetMachine>(); | ||
| Is64Bit = TM->getTargetTriple().getArch() == Triple::x86_64; | ||
|
|
||
| // Only add a thunk if we have at least one function that has the retpoline | ||
| // feature enabled in its subtarget. | ||
| // FIXME: Conditionalize on indirect calls so we don't emit a thunk when | ||
| // nothing will end up calling it. | ||
| // FIXME: It's a little silly to look at every function just to enumerate | ||
| // the subtargets, but eventually we'll want to look at them for indirect | ||
| // calls, so maybe this is OK. | ||
| if (!llvm::any_of(M, [&](const Function &F) { | ||
| // Save the subtarget we find for use in emitting the subsequent | ||
| // thunk. | ||
| STI = &TM->getSubtarget<X86Subtarget>(F); | ||
| return STI->useRetpoline() && !STI->useRetpolineExternalThunk(); | ||
| })) | ||
| return false; | ||
|
|
||
| // If we have a relevant subtarget, get the instr info as well. | ||
| TII = STI->getInstrInfo(); | ||
|
|
||
| if (Is64Bit) { | ||
| // __llvm_retpoline_r11: | ||
| // callq .Lr11_call_target | ||
| // .Lr11_capture_spec: | ||
| // pause | ||
| // lfence | ||
| // jmp .Lr11_capture_spec | ||
| // .align 16 | ||
| // .Lr11_call_target: | ||
| // movq %r11, (%rsp) | ||
| // retq | ||
|
|
||
| createThunk(M, "r11", X86::R11); | ||
| } else { | ||
| // For 32-bit targets we need to emit a collection of thunks for various | ||
| // possible scratch registers as well as a fallback that is used when | ||
| // there are no scratch registers and assumes the retpoline target has | ||
| // been pushed. | ||
| // __llvm_retpoline_eax: | ||
| // calll .Leax_call_target | ||
| // .Leax_capture_spec: | ||
| // pause | ||
| // jmp .Leax_capture_spec | ||
| // .align 16 | ||
| // .Leax_call_target: | ||
| // movl %eax, (%esp) # Clobber return addr | ||
| // retl | ||
| // | ||
| // __llvm_retpoline_ecx: | ||
| // ... # Same setup | ||
| // movl %ecx, (%esp) | ||
| // retl | ||
| // | ||
| // __llvm_retpoline_edx: | ||
| // ... # Same setup | ||
| // movl %edx, (%esp) | ||
| // retl | ||
| // | ||
| // This last one is a bit more special and so needs a little extra | ||
| // handling. | ||
| // __llvm_retpoline_push: | ||
| // calll .Lpush_call_target | ||
| // .Lpush_capture_spec: | ||
| // pause | ||
| // lfence | ||
| // jmp .Lpush_capture_spec | ||
| // .align 16 | ||
| // .Lpush_call_target: | ||
| // # Clear pause_loop return address. | ||
| // addl $4, %esp | ||
| // # Top of stack words are: Callee, RA. Exchange Callee and RA. | ||
| // pushl 4(%esp) # Push callee | ||
| // pushl 4(%esp) # Push RA | ||
| // popl 8(%esp) # Pop RA to final RA | ||
| // popl (%esp) # Pop callee to next top of stack | ||
| // retl # Ret to callee | ||
| createThunk(M, "eax", X86::EAX); | ||
| createThunk(M, "ecx", X86::ECX); | ||
| createThunk(M, "edx", X86::EDX); | ||
| createThunk(M, "push"); | ||
| } | ||
|
|
||
| return true; | ||
| } | ||
|
|
||
| Function *X86RetpolineThunks::createThunkFunction(Module &M, StringRef Name) { | ||
| LLVMContext &Ctx = M.getContext(); | ||
| auto Type = FunctionType::get(Type::getVoidTy(Ctx), false); | ||
| Function *F = | ||
| Function::Create(Type, GlobalValue::LinkOnceODRLinkage, Name, &M); | ||
| F->setVisibility(GlobalValue::HiddenVisibility); | ||
| F->setComdat(M.getOrInsertComdat(Name)); | ||
|
|
||
| // Add Attributes so that we don't create a frame, unwind information, or | ||
| // inline. | ||
| AttrBuilder B; | ||
| B.addAttribute(llvm::Attribute::NoUnwind); | ||
| B.addAttribute(llvm::Attribute::Naked); | ||
| F->addAttributes(llvm::AttributeList::FunctionIndex, B); | ||
|
|
||
| // Populate our function a bit so that we can verify. | ||
| BasicBlock *Entry = BasicBlock::Create(Ctx, "entry", F); | ||
| IRBuilder<> Builder(Entry); | ||
|
|
||
| Builder.CreateRetVoid(); | ||
| return F; | ||
| } | ||
|
|
||
| void X86RetpolineThunks::insertRegReturnAddrClobber(MachineBasicBlock &MBB, | ||
| unsigned Reg) { | ||
| const unsigned MovOpc = Is64Bit ? X86::MOV64mr : X86::MOV32mr; | ||
| const unsigned SPReg = Is64Bit ? X86::RSP : X86::ESP; | ||
| addRegOffset(BuildMI(&MBB, DebugLoc(), TII->get(MovOpc)), SPReg, false, 0) | ||
| .addReg(Reg); | ||
| } | ||
| void X86RetpolineThunks::insert32BitPushReturnAddrClobber( | ||
| MachineBasicBlock &MBB) { | ||
| // The instruction sequence we use to replace the return address without | ||
| // a scratch register is somewhat complicated: | ||
| // # Clear capture_spec from return address. | ||
| // addl $4, %esp | ||
| // # Top of stack words are: Callee, RA. Exchange Callee and RA. | ||
| // pushl 4(%esp) # Push callee | ||
| // pushl 4(%esp) # Push RA | ||
| // popl 8(%esp) # Pop RA to final RA | ||
| // popl (%esp) # Pop callee to next top of stack | ||
| // retl # Ret to callee | ||
| BuildMI(&MBB, DebugLoc(), TII->get(X86::ADD32ri), X86::ESP) | ||
| .addReg(X86::ESP) | ||
| .addImm(4); | ||
| addRegOffset(BuildMI(&MBB, DebugLoc(), TII->get(X86::PUSH32rmm)), X86::ESP, | ||
| false, 4); | ||
| addRegOffset(BuildMI(&MBB, DebugLoc(), TII->get(X86::PUSH32rmm)), X86::ESP, | ||
| false, 4); | ||
| addRegOffset(BuildMI(&MBB, DebugLoc(), TII->get(X86::POP32rmm)), X86::ESP, | ||
| false, 8); | ||
| addRegOffset(BuildMI(&MBB, DebugLoc(), TII->get(X86::POP32rmm)), X86::ESP, | ||
| false, 0); | ||
| } | ||
|
|
||
| void X86RetpolineThunks::createThunk(Module &M, StringRef NameSuffix, | ||
| Optional<unsigned> Reg) { | ||
| Function &F = | ||
| *createThunkFunction(M, (Twine("__llvm_retpoline_") + NameSuffix).str()); | ||
| MachineFunction &MF = MMI->getOrCreateMachineFunction(F); | ||
|
|
||
| // Set MF properties. We never use vregs... | ||
| MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs); | ||
|
|
||
| BasicBlock &OrigEntryBB = F.getEntryBlock(); | ||
| MachineBasicBlock *Entry = MF.CreateMachineBasicBlock(&OrigEntryBB); | ||
| MachineBasicBlock *CaptureSpec = MF.CreateMachineBasicBlock(&OrigEntryBB); | ||
| MachineBasicBlock *CallTarget = MF.CreateMachineBasicBlock(&OrigEntryBB); | ||
|
|
||
| MF.push_back(Entry); | ||
| MF.push_back(CaptureSpec); | ||
| MF.push_back(CallTarget); | ||
|
|
||
| const unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32; | ||
| const unsigned RetOpc = Is64Bit ? X86::RETQ : X86::RETL; | ||
|
|
||
| BuildMI(Entry, DebugLoc(), TII->get(CallOpc)).addMBB(CallTarget); | ||
| Entry->addSuccessor(CallTarget); | ||
| Entry->addSuccessor(CaptureSpec); | ||
| CallTarget->setHasAddressTaken(); | ||
|
|
||
| // In the capture loop for speculation, we want to stop the processor from | ||
| // speculating as fast as possible. On Intel processors, the PAUSE instruction | ||
| // will block speculation without consuming any execution resources. On AMD | ||
| // processors, the PAUSE instruction is (essentially) a nop, so we also use an | ||
| // LFENCE instruction which they have advised will stop speculation as well | ||
| // with minimal resource utilization. We still end the capture with a jump to | ||
| // form an infinite loop to fully guarantee that no matter what implementation | ||
| // of the x86 ISA, speculating this code path never escapes. | ||
| BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::PAUSE)); | ||
| BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::LFENCE)); | ||
| BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::JMP_1)).addMBB(CaptureSpec); | ||
| CaptureSpec->setHasAddressTaken(); | ||
| CaptureSpec->addSuccessor(CaptureSpec); | ||
|
|
||
| CallTarget->setAlignment(4); | ||
| if (Reg) { | ||
| insertRegReturnAddrClobber(*CallTarget, *Reg); | ||
| } else { | ||
| assert(!Is64Bit && "We only support non-reg thunks on 32-bit x86!"); | ||
| insert32BitPushReturnAddrClobber(*CallTarget); | ||
| } | ||
| BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc)); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,166 @@ | ||
| ; RUN: llc -mtriple=x86_64-unknown < %s | FileCheck %s --implicit-check-not="jmp.*\*" --implicit-check-not="call.*\*" --check-prefix=X64 | ||
| ; RUN: llc -mtriple=x86_64-unknown -O0 < %s | FileCheck %s --implicit-check-not="jmp.*\*" --implicit-check-not="call.*\*" --check-prefix=X64FAST | ||
|
|
||
| ; RUN: llc -mtriple=i686-unknown < %s | FileCheck %s --implicit-check-not="jmp.*\*" --implicit-check-not="call.*\*" --check-prefix=X86 | ||
| ; RUN: llc -mtriple=i686-unknown -O0 < %s | FileCheck %s --implicit-check-not="jmp.*\*" --implicit-check-not="call.*\*" --check-prefix=X86FAST | ||
|
|
||
| declare void @bar(i32) | ||
|
|
||
| ; Test a simple indirect call and tail call. | ||
| define void @icall_reg(void (i32)* %fp, i32 %x) #0 { | ||
| entry: | ||
| tail call void @bar(i32 %x) | ||
| tail call void %fp(i32 %x) | ||
| tail call void @bar(i32 %x) | ||
| tail call void %fp(i32 %x) | ||
| ret void | ||
| } | ||
|
|
||
| ; X64-LABEL: icall_reg: | ||
| ; X64-DAG: movq %rdi, %[[fp:[^ ]*]] | ||
| ; X64-DAG: movl %esi, %[[x:[^ ]*]] | ||
| ; X64: movl %[[x]], %edi | ||
| ; X64: callq bar | ||
| ; X64-DAG: movl %[[x]], %edi | ||
| ; X64-DAG: movq %[[fp]], %r11 | ||
| ; X64: callq __llvm_external_retpoline_r11 | ||
| ; X64: movl %[[x]], %edi | ||
| ; X64: callq bar | ||
| ; X64-DAG: movl %[[x]], %edi | ||
| ; X64-DAG: movq %[[fp]], %r11 | ||
| ; X64: jmp __llvm_external_retpoline_r11 # TAILCALL | ||
|
|
||
| ; X64FAST-LABEL: icall_reg: | ||
| ; X64FAST: callq bar | ||
| ; X64FAST: callq __llvm_external_retpoline_r11 | ||
| ; X64FAST: callq bar | ||
| ; X64FAST: jmp __llvm_external_retpoline_r11 # TAILCALL | ||
|
|
||
| ; X86-LABEL: icall_reg: | ||
| ; X86-DAG: movl 12(%esp), %[[fp:[^ ]*]] | ||
| ; X86-DAG: movl 16(%esp), %[[x:[^ ]*]] | ||
| ; X86: pushl %[[x]] | ||
| ; X86: calll bar | ||
| ; X86: movl %[[fp]], %eax | ||
| ; X86: pushl %[[x]] | ||
| ; X86: calll __llvm_external_retpoline_eax | ||
| ; X86: pushl %[[x]] | ||
| ; X86: calll bar | ||
| ; X86: movl %[[fp]], %eax | ||
| ; X86: pushl %[[x]] | ||
| ; X86: calll __llvm_external_retpoline_eax | ||
| ; X86-NOT: # TAILCALL | ||
|
|
||
| ; X86FAST-LABEL: icall_reg: | ||
| ; X86FAST: calll bar | ||
| ; X86FAST: calll __llvm_external_retpoline_eax | ||
| ; X86FAST: calll bar | ||
| ; X86FAST: calll __llvm_external_retpoline_eax | ||
|
|
||
|
|
||
| @global_fp = external global void (i32)* | ||
|
|
||
| ; Test an indirect call through a global variable. | ||
| define void @icall_global_fp(i32 %x, void (i32)** %fpp) #0 { | ||
| %fp1 = load void (i32)*, void (i32)** @global_fp | ||
| call void %fp1(i32 %x) | ||
| %fp2 = load void (i32)*, void (i32)** @global_fp | ||
| tail call void %fp2(i32 %x) | ||
| ret void | ||
| } | ||
|
|
||
| ; X64-LABEL: icall_global_fp: | ||
| ; X64-DAG: movl %edi, %[[x:[^ ]*]] | ||
| ; X64-DAG: movq global_fp(%rip), %r11 | ||
| ; X64: callq __llvm_external_retpoline_r11 | ||
| ; X64-DAG: movl %[[x]], %edi | ||
| ; X64-DAG: movq global_fp(%rip), %r11 | ||
| ; X64: jmp __llvm_external_retpoline_r11 # TAILCALL | ||
|
|
||
| ; X64FAST-LABEL: icall_global_fp: | ||
| ; X64FAST: movq global_fp(%rip), %r11 | ||
| ; X64FAST: callq __llvm_external_retpoline_r11 | ||
| ; X64FAST: movq global_fp(%rip), %r11 | ||
| ; X64FAST: jmp __llvm_external_retpoline_r11 # TAILCALL | ||
|
|
||
| ; X86-LABEL: icall_global_fp: | ||
| ; X86: movl global_fp, %eax | ||
| ; X86: pushl 4(%esp) | ||
| ; X86: calll __llvm_external_retpoline_eax | ||
| ; X86: addl $4, %esp | ||
| ; X86: movl global_fp, %eax | ||
| ; X86: jmp __llvm_external_retpoline_eax # TAILCALL | ||
|
|
||
| ; X86FAST-LABEL: icall_global_fp: | ||
| ; X86FAST: calll __llvm_external_retpoline_eax | ||
| ; X86FAST: jmp __llvm_external_retpoline_eax # TAILCALL | ||
|
|
||
|
|
||
| %struct.Foo = type { void (%struct.Foo*)** } | ||
|
|
||
| ; Test an indirect call through a vtable. | ||
| define void @vcall(%struct.Foo* %obj) #0 { | ||
| %vptr_field = getelementptr %struct.Foo, %struct.Foo* %obj, i32 0, i32 0 | ||
| %vptr = load void (%struct.Foo*)**, void (%struct.Foo*)*** %vptr_field | ||
| %vslot = getelementptr void(%struct.Foo*)*, void(%struct.Foo*)** %vptr, i32 1 | ||
| %fp = load void(%struct.Foo*)*, void(%struct.Foo*)** %vslot | ||
| tail call void %fp(%struct.Foo* %obj) | ||
| tail call void %fp(%struct.Foo* %obj) | ||
| ret void | ||
| } | ||
|
|
||
| ; X64-LABEL: vcall: | ||
| ; X64: movq %rdi, %[[obj:[^ ]*]] | ||
| ; X64: movq (%[[obj]]), %[[vptr:[^ ]*]] | ||
| ; X64: movq 8(%[[vptr]]), %[[fp:[^ ]*]] | ||
| ; X64: movq %[[fp]], %r11 | ||
| ; X64: callq __llvm_external_retpoline_r11 | ||
| ; X64-DAG: movq %[[obj]], %rdi | ||
| ; X64-DAG: movq %[[fp]], %r11 | ||
| ; X64: jmp __llvm_external_retpoline_r11 # TAILCALL | ||
|
|
||
| ; X64FAST-LABEL: vcall: | ||
| ; X64FAST: callq __llvm_external_retpoline_r11 | ||
| ; X64FAST: jmp __llvm_external_retpoline_r11 # TAILCALL | ||
|
|
||
| ; X86-LABEL: vcall: | ||
| ; X86: movl 8(%esp), %[[obj:[^ ]*]] | ||
| ; X86: movl (%[[obj]]), %[[vptr:[^ ]*]] | ||
| ; X86: movl 4(%[[vptr]]), %[[fp:[^ ]*]] | ||
| ; X86: movl %[[fp]], %eax | ||
| ; X86: pushl %[[obj]] | ||
| ; X86: calll __llvm_external_retpoline_eax | ||
| ; X86: addl $4, %esp | ||
| ; X86: movl %[[fp]], %eax | ||
| ; X86: jmp __llvm_external_retpoline_eax # TAILCALL | ||
|
|
||
| ; X86FAST-LABEL: vcall: | ||
| ; X86FAST: calll __llvm_external_retpoline_eax | ||
| ; X86FAST: jmp __llvm_external_retpoline_eax # TAILCALL | ||
|
|
||
|
|
||
| declare void @direct_callee() | ||
|
|
||
| define void @direct_tail() #0 { | ||
| tail call void @direct_callee() | ||
| ret void | ||
| } | ||
|
|
||
| ; X64-LABEL: direct_tail: | ||
| ; X64: jmp direct_callee # TAILCALL | ||
| ; X64FAST-LABEL: direct_tail: | ||
| ; X64FAST: jmp direct_callee # TAILCALL | ||
| ; X86-LABEL: direct_tail: | ||
| ; X86: jmp direct_callee # TAILCALL | ||
| ; X86FAST-LABEL: direct_tail: | ||
| ; X86FAST: jmp direct_callee # TAILCALL | ||
|
|
||
|
|
||
| ; Lastly check that no thunks were emitted. | ||
| ; X64-NOT: __{{.*}}_retpoline_{{.*}}: | ||
| ; X64FAST-NOT: __{{.*}}_retpoline_{{.*}}: | ||
| ; X86-NOT: __{{.*}}_retpoline_{{.*}}: | ||
| ; X86FAST-NOT: __{{.*}}_retpoline_{{.*}}: | ||
|
|
||
|
|
||
| attributes #0 = { "target-features"="+retpoline-external-thunk" } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,367 @@ | ||
| ; RUN: llc -mtriple=x86_64-unknown < %s | FileCheck %s --implicit-check-not="jmp.*\*" --implicit-check-not="call.*\*" --check-prefix=X64 | ||
| ; RUN: llc -mtriple=x86_64-unknown -O0 < %s | FileCheck %s --implicit-check-not="jmp.*\*" --implicit-check-not="call.*\*" --check-prefix=X64FAST | ||
|
|
||
| ; RUN: llc -mtriple=i686-unknown < %s | FileCheck %s --implicit-check-not="jmp.*\*" --implicit-check-not="call.*\*" --check-prefix=X86 | ||
| ; RUN: llc -mtriple=i686-unknown -O0 < %s | FileCheck %s --implicit-check-not="jmp.*\*" --implicit-check-not="call.*\*" --check-prefix=X86FAST | ||
|
|
||
| declare void @bar(i32) | ||
|
|
||
| ; Test a simple indirect call and tail call. | ||
| define void @icall_reg(void (i32)* %fp, i32 %x) #0 { | ||
| entry: | ||
| tail call void @bar(i32 %x) | ||
| tail call void %fp(i32 %x) | ||
| tail call void @bar(i32 %x) | ||
| tail call void %fp(i32 %x) | ||
| ret void | ||
| } | ||
|
|
||
| ; X64-LABEL: icall_reg: | ||
| ; X64-DAG: movq %rdi, %[[fp:[^ ]*]] | ||
| ; X64-DAG: movl %esi, %[[x:[^ ]*]] | ||
| ; X64: movl %[[x]], %edi | ||
| ; X64: callq bar | ||
| ; X64-DAG: movl %[[x]], %edi | ||
| ; X64-DAG: movq %[[fp]], %r11 | ||
| ; X64: callq __llvm_retpoline_r11 | ||
| ; X64: movl %[[x]], %edi | ||
| ; X64: callq bar | ||
| ; X64-DAG: movl %[[x]], %edi | ||
| ; X64-DAG: movq %[[fp]], %r11 | ||
| ; X64: jmp __llvm_retpoline_r11 # TAILCALL | ||
|
|
||
| ; X64FAST-LABEL: icall_reg: | ||
| ; X64FAST: callq bar | ||
| ; X64FAST: callq __llvm_retpoline_r11 | ||
| ; X64FAST: callq bar | ||
| ; X64FAST: jmp __llvm_retpoline_r11 # TAILCALL | ||
|
|
||
| ; X86-LABEL: icall_reg: | ||
| ; X86-DAG: movl 12(%esp), %[[fp:[^ ]*]] | ||
| ; X86-DAG: movl 16(%esp), %[[x:[^ ]*]] | ||
| ; X86: pushl %[[x]] | ||
| ; X86: calll bar | ||
| ; X86: movl %[[fp]], %eax | ||
| ; X86: pushl %[[x]] | ||
| ; X86: calll __llvm_retpoline_eax | ||
| ; X86: pushl %[[x]] | ||
| ; X86: calll bar | ||
| ; X86: movl %[[fp]], %eax | ||
| ; X86: pushl %[[x]] | ||
| ; X86: calll __llvm_retpoline_eax | ||
| ; X86-NOT: # TAILCALL | ||
|
|
||
| ; X86FAST-LABEL: icall_reg: | ||
| ; X86FAST: calll bar | ||
| ; X86FAST: calll __llvm_retpoline_eax | ||
| ; X86FAST: calll bar | ||
| ; X86FAST: calll __llvm_retpoline_eax | ||
|
|
||
|
|
||
| @global_fp = external global void (i32)* | ||
|
|
||
| ; Test an indirect call through a global variable. | ||
| define void @icall_global_fp(i32 %x, void (i32)** %fpp) #0 { | ||
| %fp1 = load void (i32)*, void (i32)** @global_fp | ||
| call void %fp1(i32 %x) | ||
| %fp2 = load void (i32)*, void (i32)** @global_fp | ||
| tail call void %fp2(i32 %x) | ||
| ret void | ||
| } | ||
|
|
||
| ; X64-LABEL: icall_global_fp: | ||
| ; X64-DAG: movl %edi, %[[x:[^ ]*]] | ||
| ; X64-DAG: movq global_fp(%rip), %r11 | ||
| ; X64: callq __llvm_retpoline_r11 | ||
| ; X64-DAG: movl %[[x]], %edi | ||
| ; X64-DAG: movq global_fp(%rip), %r11 | ||
| ; X64: jmp __llvm_retpoline_r11 # TAILCALL | ||
|
|
||
| ; X64FAST-LABEL: icall_global_fp: | ||
| ; X64FAST: movq global_fp(%rip), %r11 | ||
| ; X64FAST: callq __llvm_retpoline_r11 | ||
| ; X64FAST: movq global_fp(%rip), %r11 | ||
| ; X64FAST: jmp __llvm_retpoline_r11 # TAILCALL | ||
|
|
||
| ; X86-LABEL: icall_global_fp: | ||
| ; X86: movl global_fp, %eax | ||
| ; X86: pushl 4(%esp) | ||
| ; X86: calll __llvm_retpoline_eax | ||
| ; X86: addl $4, %esp | ||
| ; X86: movl global_fp, %eax | ||
| ; X86: jmp __llvm_retpoline_eax # TAILCALL | ||
|
|
||
| ; X86FAST-LABEL: icall_global_fp: | ||
| ; X86FAST: calll __llvm_retpoline_eax | ||
| ; X86FAST: jmp __llvm_retpoline_eax # TAILCALL | ||
|
|
||
|
|
||
| %struct.Foo = type { void (%struct.Foo*)** } | ||
|
|
||
| ; Test an indirect call through a vtable. | ||
| define void @vcall(%struct.Foo* %obj) #0 { | ||
| %vptr_field = getelementptr %struct.Foo, %struct.Foo* %obj, i32 0, i32 0 | ||
| %vptr = load void (%struct.Foo*)**, void (%struct.Foo*)*** %vptr_field | ||
| %vslot = getelementptr void(%struct.Foo*)*, void(%struct.Foo*)** %vptr, i32 1 | ||
| %fp = load void(%struct.Foo*)*, void(%struct.Foo*)** %vslot | ||
| tail call void %fp(%struct.Foo* %obj) | ||
| tail call void %fp(%struct.Foo* %obj) | ||
| ret void | ||
| } | ||
|
|
||
| ; X64-LABEL: vcall: | ||
| ; X64: movq %rdi, %[[obj:[^ ]*]] | ||
| ; X64: movq (%[[obj]]), %[[vptr:[^ ]*]] | ||
| ; X64: movq 8(%[[vptr]]), %[[fp:[^ ]*]] | ||
| ; X64: movq %[[fp]], %r11 | ||
| ; X64: callq __llvm_retpoline_r11 | ||
| ; X64-DAG: movq %[[obj]], %rdi | ||
| ; X64-DAG: movq %[[fp]], %r11 | ||
| ; X64: jmp __llvm_retpoline_r11 # TAILCALL | ||
|
|
||
| ; X64FAST-LABEL: vcall: | ||
| ; X64FAST: callq __llvm_retpoline_r11 | ||
| ; X64FAST: jmp __llvm_retpoline_r11 # TAILCALL | ||
|
|
||
| ; X86-LABEL: vcall: | ||
| ; X86: movl 8(%esp), %[[obj:[^ ]*]] | ||
| ; X86: movl (%[[obj]]), %[[vptr:[^ ]*]] | ||
| ; X86: movl 4(%[[vptr]]), %[[fp:[^ ]*]] | ||
| ; X86: movl %[[fp]], %eax | ||
| ; X86: pushl %[[obj]] | ||
| ; X86: calll __llvm_retpoline_eax | ||
| ; X86: addl $4, %esp | ||
| ; X86: movl %[[fp]], %eax | ||
| ; X86: jmp __llvm_retpoline_eax # TAILCALL | ||
|
|
||
| ; X86FAST-LABEL: vcall: | ||
| ; X86FAST: calll __llvm_retpoline_eax | ||
| ; X86FAST: jmp __llvm_retpoline_eax # TAILCALL | ||
|
|
||
|
|
||
| declare void @direct_callee() | ||
|
|
||
| define void @direct_tail() #0 { | ||
| tail call void @direct_callee() | ||
| ret void | ||
| } | ||
|
|
||
| ; X64-LABEL: direct_tail: | ||
| ; X64: jmp direct_callee # TAILCALL | ||
| ; X64FAST-LABEL: direct_tail: | ||
| ; X64FAST: jmp direct_callee # TAILCALL | ||
| ; X86-LABEL: direct_tail: | ||
| ; X86: jmp direct_callee # TAILCALL | ||
| ; X86FAST-LABEL: direct_tail: | ||
| ; X86FAST: jmp direct_callee # TAILCALL | ||
|
|
||
|
|
||
| declare void @nonlazybind_callee() #1 | ||
|
|
||
| define void @nonlazybind_caller() #0 { | ||
| call void @nonlazybind_callee() | ||
| tail call void @nonlazybind_callee() | ||
| ret void | ||
| } | ||
|
|
||
| ; X64-LABEL: nonlazybind_caller: | ||
| ; X64: movq nonlazybind_callee@GOTPCREL(%rip), %[[REG:.*]] | ||
| ; X64: movq %[[REG]], %r11 | ||
| ; X64: callq __llvm_retpoline_r11 | ||
| ; X64: movq %[[REG]], %r11 | ||
| ; X64: jmp __llvm_retpoline_r11 # TAILCALL | ||
| ; X64FAST-LABEL: nonlazybind_caller: | ||
| ; X64FAST: movq nonlazybind_callee@GOTPCREL(%rip), %r11 | ||
| ; X64FAST: callq __llvm_retpoline_r11 | ||
| ; X64FAST: movq nonlazybind_callee@GOTPCREL(%rip), %r11 | ||
| ; X64FAST: jmp __llvm_retpoline_r11 # TAILCALL | ||
| ; X86-LABEL: nonlazybind_caller: | ||
| ; X86: calll nonlazybind_callee@PLT | ||
| ; X86: jmp nonlazybind_callee@PLT # TAILCALL | ||
| ; X86FAST-LABEL: nonlazybind_caller: | ||
| ; X86FAST: calll nonlazybind_callee@PLT | ||
| ; X86FAST: jmp nonlazybind_callee@PLT # TAILCALL | ||
|
|
||
|
|
||
| @indirectbr_rewrite.targets = constant [10 x i8*] [i8* blockaddress(@indirectbr_rewrite, %bb0), | ||
| i8* blockaddress(@indirectbr_rewrite, %bb1), | ||
| i8* blockaddress(@indirectbr_rewrite, %bb2), | ||
| i8* blockaddress(@indirectbr_rewrite, %bb3), | ||
| i8* blockaddress(@indirectbr_rewrite, %bb4), | ||
| i8* blockaddress(@indirectbr_rewrite, %bb5), | ||
| i8* blockaddress(@indirectbr_rewrite, %bb6), | ||
| i8* blockaddress(@indirectbr_rewrite, %bb7), | ||
| i8* blockaddress(@indirectbr_rewrite, %bb8), | ||
| i8* blockaddress(@indirectbr_rewrite, %bb9)] | ||
|
|
||
| ; Check that when retpolines are enabled a function with indirectbr gets | ||
| ; rewritten to use switch, and that in turn doesn't get lowered as a jump | ||
| ; table. | ||
| define void @indirectbr_rewrite(i64* readonly %p, i64* %sink) #0 { | ||
| ; X64-LABEL: indirectbr_rewrite: | ||
| ; X64-NOT: jmpq | ||
| ; X86-LABEL: indirectbr_rewrite: | ||
| ; X86-NOT: jmpl | ||
| entry: | ||
| %i0 = load i64, i64* %p | ||
| %target.i0 = getelementptr [10 x i8*], [10 x i8*]* @indirectbr_rewrite.targets, i64 0, i64 %i0 | ||
| %target0 = load i8*, i8** %target.i0 | ||
| indirectbr i8* %target0, [label %bb1, label %bb3] | ||
|
|
||
| bb0: | ||
| store volatile i64 0, i64* %sink | ||
| br label %latch | ||
|
|
||
| bb1: | ||
| store volatile i64 1, i64* %sink | ||
| br label %latch | ||
|
|
||
| bb2: | ||
| store volatile i64 2, i64* %sink | ||
| br label %latch | ||
|
|
||
| bb3: | ||
| store volatile i64 3, i64* %sink | ||
| br label %latch | ||
|
|
||
| bb4: | ||
| store volatile i64 4, i64* %sink | ||
| br label %latch | ||
|
|
||
| bb5: | ||
| store volatile i64 5, i64* %sink | ||
| br label %latch | ||
|
|
||
| bb6: | ||
| store volatile i64 6, i64* %sink | ||
| br label %latch | ||
|
|
||
| bb7: | ||
| store volatile i64 7, i64* %sink | ||
| br label %latch | ||
|
|
||
| bb8: | ||
| store volatile i64 8, i64* %sink | ||
| br label %latch | ||
|
|
||
| bb9: | ||
| store volatile i64 9, i64* %sink | ||
| br label %latch | ||
|
|
||
| latch: | ||
| %i.next = load i64, i64* %p | ||
| %target.i.next = getelementptr [10 x i8*], [10 x i8*]* @indirectbr_rewrite.targets, i64 0, i64 %i.next | ||
| %target.next = load i8*, i8** %target.i.next | ||
| ; Potentially hit a full 10 successors here so that even if we rewrite as | ||
| ; a switch it will try to be lowered with a jump table. | ||
| indirectbr i8* %target.next, [label %bb0, | ||
| label %bb1, | ||
| label %bb2, | ||
| label %bb3, | ||
| label %bb4, | ||
| label %bb5, | ||
| label %bb6, | ||
| label %bb7, | ||
| label %bb8, | ||
| label %bb9] | ||
| } | ||
|
|
||
| ; Lastly check that the necessary thunks were emitted. | ||
| ; | ||
| ; X64-LABEL: .section .text.__llvm_retpoline_r11,{{.*}},__llvm_retpoline_r11,comdat | ||
| ; X64-NEXT: .hidden __llvm_retpoline_r11 | ||
| ; X64-NEXT: .weak __llvm_retpoline_r11 | ||
| ; X64: __llvm_retpoline_r11: | ||
| ; X64-NEXT: # {{.*}} # %entry | ||
| ; X64-NEXT: callq [[CALL_TARGET:.*]] | ||
| ; X64-NEXT: [[CAPTURE_SPEC:.*]]: # Block address taken | ||
| ; X64-NEXT: # %entry | ||
| ; X64-NEXT: # =>This Inner Loop Header: Depth=1 | ||
| ; X64-NEXT: pause | ||
| ; X64-NEXT: lfence | ||
| ; X64-NEXT: jmp [[CAPTURE_SPEC]] | ||
| ; X64-NEXT: .p2align 4, 0x90 | ||
| ; X64-NEXT: [[CALL_TARGET]]: # Block address taken | ||
| ; X64-NEXT: # %entry | ||
| ; X64-NEXT: movq %r11, (%rsp) | ||
| ; X64-NEXT: retq | ||
| ; | ||
| ; X86-LABEL: .section .text.__llvm_retpoline_eax,{{.*}},__llvm_retpoline_eax,comdat | ||
| ; X86-NEXT: .hidden __llvm_retpoline_eax | ||
| ; X86-NEXT: .weak __llvm_retpoline_eax | ||
| ; X86: __llvm_retpoline_eax: | ||
| ; X86-NEXT: # {{.*}} # %entry | ||
| ; X86-NEXT: calll [[CALL_TARGET:.*]] | ||
| ; X86-NEXT: [[CAPTURE_SPEC:.*]]: # Block address taken | ||
| ; X86-NEXT: # %entry | ||
| ; X86-NEXT: # =>This Inner Loop Header: Depth=1 | ||
| ; X86-NEXT: pause | ||
| ; X86-NEXT: lfence | ||
| ; X86-NEXT: jmp [[CAPTURE_SPEC]] | ||
| ; X86-NEXT: .p2align 4, 0x90 | ||
| ; X86-NEXT: [[CALL_TARGET]]: # Block address taken | ||
| ; X86-NEXT: # %entry | ||
| ; X86-NEXT: movl %eax, (%esp) | ||
| ; X86-NEXT: retl | ||
| ; | ||
| ; X86-LABEL: .section .text.__llvm_retpoline_ecx,{{.*}},__llvm_retpoline_ecx,comdat | ||
| ; X86-NEXT: .hidden __llvm_retpoline_ecx | ||
| ; X86-NEXT: .weak __llvm_retpoline_ecx | ||
| ; X86: __llvm_retpoline_ecx: | ||
| ; X86-NEXT: # {{.*}} # %entry | ||
| ; X86-NEXT: calll [[CALL_TARGET:.*]] | ||
| ; X86-NEXT: [[CAPTURE_SPEC:.*]]: # Block address taken | ||
| ; X86-NEXT: # %entry | ||
| ; X86-NEXT: # =>This Inner Loop Header: Depth=1 | ||
| ; X86-NEXT: pause | ||
| ; X86-NEXT: lfence | ||
| ; X86-NEXT: jmp [[CAPTURE_SPEC]] | ||
| ; X86-NEXT: .p2align 4, 0x90 | ||
| ; X86-NEXT: [[CALL_TARGET]]: # Block address taken | ||
| ; X86-NEXT: # %entry | ||
| ; X86-NEXT: movl %ecx, (%esp) | ||
| ; X86-NEXT: retl | ||
| ; | ||
| ; X86-LABEL: .section .text.__llvm_retpoline_edx,{{.*}},__llvm_retpoline_edx,comdat | ||
| ; X86-NEXT: .hidden __llvm_retpoline_edx | ||
| ; X86-NEXT: .weak __llvm_retpoline_edx | ||
| ; X86: __llvm_retpoline_edx: | ||
| ; X86-NEXT: # {{.*}} # %entry | ||
| ; X86-NEXT: calll [[CALL_TARGET:.*]] | ||
| ; X86-NEXT: [[CAPTURE_SPEC:.*]]: # Block address taken | ||
| ; X86-NEXT: # %entry | ||
| ; X86-NEXT: # =>This Inner Loop Header: Depth=1 | ||
| ; X86-NEXT: pause | ||
| ; X86-NEXT: lfence | ||
| ; X86-NEXT: jmp [[CAPTURE_SPEC]] | ||
| ; X86-NEXT: .p2align 4, 0x90 | ||
| ; X86-NEXT: [[CALL_TARGET]]: # Block address taken | ||
| ; X86-NEXT: # %entry | ||
| ; X86-NEXT: movl %edx, (%esp) | ||
| ; X86-NEXT: retl | ||
| ; | ||
| ; X86-LABEL: .section .text.__llvm_retpoline_push,{{.*}},__llvm_retpoline_push,comdat | ||
| ; X86-NEXT: .hidden __llvm_retpoline_push | ||
| ; X86-NEXT: .weak __llvm_retpoline_push | ||
| ; X86: __llvm_retpoline_push: | ||
| ; X86-NEXT: # {{.*}} # %entry | ||
| ; X86-NEXT: calll [[CALL_TARGET:.*]] | ||
| ; X86-NEXT: [[CAPTURE_SPEC:.*]]: # Block address taken | ||
| ; X86-NEXT: # %entry | ||
| ; X86-NEXT: # =>This Inner Loop Header: Depth=1 | ||
| ; X86-NEXT: pause | ||
| ; X86-NEXT: lfence | ||
| ; X86-NEXT: jmp [[CAPTURE_SPEC]] | ||
| ; X86-NEXT: .p2align 4, 0x90 | ||
| ; X86-NEXT: [[CALL_TARGET]]: # Block address taken | ||
| ; X86-NEXT: # %entry | ||
| ; X86-NEXT: addl $4, %esp | ||
| ; X86-NEXT: pushl 4(%esp) | ||
| ; X86-NEXT: pushl 4(%esp) | ||
| ; X86-NEXT: popl 8(%esp) | ||
| ; X86-NEXT: popl (%esp) | ||
| ; X86-NEXT: retl | ||
|
|
||
|
|
||
| attributes #0 = { "target-features"="+retpoline" } | ||
| attributes #1 = { nonlazybind } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,63 @@ | ||
| ; RUN: opt < %s -indirectbr-expand -S | FileCheck %s | ||
| ; | ||
| ; REQUIRES: x86-registered-target | ||
|
|
||
| target triple = "x86_64-unknown-linux-gnu" | ||
|
|
||
| @test1.targets = constant [4 x i8*] [i8* blockaddress(@test1, %bb0), | ||
| i8* blockaddress(@test1, %bb1), | ||
| i8* blockaddress(@test1, %bb2), | ||
| i8* blockaddress(@test1, %bb3)] | ||
| ; CHECK-LABEL: @test1.targets = constant [4 x i8*] | ||
| ; CHECK: [i8* inttoptr (i64 1 to i8*), | ||
| ; CHECK: i8* inttoptr (i64 2 to i8*), | ||
| ; CHECK: i8* inttoptr (i64 3 to i8*), | ||
| ; CHECK: i8* blockaddress(@test1, %bb3)] | ||
|
|
||
| define void @test1(i64* readonly %p, i64* %sink) #0 { | ||
| ; CHECK-LABEL: define void @test1( | ||
| entry: | ||
| %i0 = load i64, i64* %p | ||
| %target.i0 = getelementptr [4 x i8*], [4 x i8*]* @test1.targets, i64 0, i64 %i0 | ||
| %target0 = load i8*, i8** %target.i0 | ||
| ; Only a subset of blocks are viable successors here. | ||
| indirectbr i8* %target0, [label %bb0, label %bb1] | ||
| ; CHECK-NOT: indirectbr | ||
| ; CHECK: %[[ENTRY_V:.*]] = ptrtoint i8* %{{.*}} to i64 | ||
| ; CHECK-NEXT: br label %[[SWITCH_BB:.*]] | ||
|
|
||
| bb0: | ||
| store volatile i64 0, i64* %sink | ||
| br label %latch | ||
|
|
||
| bb1: | ||
| store volatile i64 1, i64* %sink | ||
| br label %latch | ||
|
|
||
| bb2: | ||
| store volatile i64 2, i64* %sink | ||
| br label %latch | ||
|
|
||
| bb3: | ||
| store volatile i64 3, i64* %sink | ||
| br label %latch | ||
|
|
||
| latch: | ||
| %i.next = load i64, i64* %p | ||
| %target.i.next = getelementptr [4 x i8*], [4 x i8*]* @test1.targets, i64 0, i64 %i.next | ||
| %target.next = load i8*, i8** %target.i.next | ||
| ; A different subset of blocks are viable successors here. | ||
| indirectbr i8* %target.next, [label %bb1, label %bb2] | ||
| ; CHECK-NOT: indirectbr | ||
| ; CHECK: %[[LATCH_V:.*]] = ptrtoint i8* %{{.*}} to i64 | ||
| ; CHECK-NEXT: br label %[[SWITCH_BB]] | ||
| ; | ||
| ; CHECK: [[SWITCH_BB]]: | ||
| ; CHECK-NEXT: %[[V:.*]] = phi i64 [ %[[ENTRY_V]], %entry ], [ %[[LATCH_V]], %latch ] | ||
| ; CHECK-NEXT: switch i64 %[[V]], label %bb0 [ | ||
| ; CHECK-NEXT: i64 2, label %bb1 | ||
| ; CHECK-NEXT: i64 3, label %bb2 | ||
| ; CHECK-NEXT: ] | ||
| } | ||
|
|
||
| attributes #0 = { "target-features"="+retpoline" } |