|
| 1 | +; RUN: opt -passes=inline -inline-threshold=0 -inline-all-viable-calls -S < %s | FileCheck %s |
| 2 | + |
| 3 | +; Check that viable calls that are beyond the cost threshold are still inlined. |
| 4 | +define i32 @callee_simple(i32 %x) { |
| 5 | + %1 = add i32 %x, 1 |
| 6 | + %2 = mul i32 %1, 2 |
| 7 | + %3 = sub i32 %2, 1 |
| 8 | + %4 = add i32 %3, 3 |
| 9 | + %5 = mul i32 %4, 2 |
| 10 | + %6 = sub i32 %5, 2 |
| 11 | + %7 = add i32 %6, 1 |
| 12 | + ret i32 %7 |
| 13 | +} |
| 14 | + |
| 15 | +; Check that user decisions are respected. |
| 16 | +define i32 @callee_alwaysinline(i32 %x) alwaysinline { |
| 17 | + %sub = sub i32 %x, 3 |
| 18 | + ret i32 %sub |
| 19 | +} |
| 20 | + |
| 21 | +define i32 @callee_noinline(i32 %x) noinline { |
| 22 | + %div = sdiv i32 %x, 2 |
| 23 | + ret i32 %div |
| 24 | +} |
| 25 | + |
| 26 | +define i32 @callee_optnone(i32 %x) optnone noinline { |
| 27 | + %rem = srem i32 %x, 2 |
| 28 | + ret i32 %rem |
| 29 | +} |
| 30 | + |
| 31 | +define i32 @caller(i32 %a) { |
| 32 | +; CHECK-LABEL: define i32 @caller( |
| 33 | +; CHECK-SAME: i32 [[A:%.*]]) { |
| 34 | +; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[A]], 1 |
| 35 | +; CHECK-NEXT: [[TMP8:%.*]] = mul i32 [[TMP7]], 2 |
| 36 | +; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[TMP8]], 1 |
| 37 | +; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 3 |
| 38 | +; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], 2 |
| 39 | +; CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], 2 |
| 40 | +; CHECK-NEXT: [[ADD_I:%.*]] = add i32 [[TMP6]], 1 |
| 41 | +; CHECK-NEXT: [[SUB_I:%.*]] = sub i32 [[ADD_I]], 3 |
| 42 | +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @callee_noinline(i32 [[SUB_I]]) |
| 43 | +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @callee_optnone(i32 [[TMP1]]) |
| 44 | +; CHECK-NEXT: [[SUM:%.*]] = add i32 [[TMP2]], [[TMP1]] |
| 45 | +; CHECK-NEXT: ret i32 [[SUM]] |
| 46 | +; |
| 47 | + %1 = call i32 @callee_simple(i32 %a) |
| 48 | + %2 = call i32 @callee_alwaysinline(i32 %1) |
| 49 | + %3 = call i32 @callee_noinline(i32 %2) |
| 50 | + %4 = call i32 @callee_optnone(i32 %3) |
| 51 | + %sum = add i32 %4, %3 |
| 52 | + ret i32 %sum |
| 53 | +} |
| 54 | + |
| 55 | +; Check that non-viable calls are not inlined |
| 56 | + |
| 57 | +; Test recursive function is not inlined |
| 58 | +define i32 @recursive(i32 %n) { |
| 59 | +entry: |
| 60 | + %cmp = icmp eq i32 %n, 0 |
| 61 | + br i1 %cmp, label %base, label %recurse |
| 62 | + |
| 63 | +base: |
| 64 | + ret i32 0 |
| 65 | + |
| 66 | +recurse: |
| 67 | + %dec = sub i32 %n, 1 |
| 68 | + %rec = call i32 @recursive(i32 %dec) |
| 69 | + %add = add i32 %rec, 1 |
| 70 | + ret i32 %add |
| 71 | +} |
| 72 | + |
| 73 | +define i32 @call_recursive(i32 %x) { |
| 74 | +; CHECK-LABEL: define i32 @call_recursive( |
| 75 | +; CHECK-SAME: i32 [[X:%.*]]) { |
| 76 | +; CHECK-NEXT: [[R:%.*]] = call i32 @recursive(i32 [[X]]) |
| 77 | +; CHECK-NEXT: ret i32 [[R]] |
| 78 | +; |
| 79 | + %r = call i32 @recursive(i32 %x) |
| 80 | + ret i32 %r |
| 81 | +} |
| 82 | + |
| 83 | +; Test indirectbr prevents inlining |
| 84 | +define void @has_indirectbr(ptr %ptr, i32 %cond) { |
| 85 | +entry: |
| 86 | + switch i32 %cond, label %default [ |
| 87 | + i32 0, label %target0 |
| 88 | + i32 1, label %target1 |
| 89 | + ] |
| 90 | + |
| 91 | +target0: |
| 92 | + br label %end |
| 93 | + |
| 94 | +target1: |
| 95 | + br label %end |
| 96 | + |
| 97 | +default: |
| 98 | + br label %end |
| 99 | + |
| 100 | +end: |
| 101 | + indirectbr ptr %ptr, [label %target0, label %target1] |
| 102 | + ret void |
| 103 | +} |
| 104 | + |
| 105 | +define void @call_indirectbr(ptr %p, i32 %c) { |
| 106 | +; CHECK-LABEL: define void @call_indirectbr( |
| 107 | +; CHECK-SAME: ptr [[P:%.*]], i32 [[C:%.*]]) { |
| 108 | +; CHECK-NEXT: call void @has_indirectbr(ptr [[P]], i32 [[C]]) |
| 109 | +; CHECK-NEXT: ret void |
| 110 | +; |
| 111 | + call void @has_indirectbr(ptr %p, i32 %c) |
| 112 | + ret void |
| 113 | +} |
| 114 | + |
0 commit comments