311 changes: 311 additions & 0 deletions llvm/test/CodeGen/RISCV/frame-info.ll

Large diffs are not rendered by default.

10 changes: 10 additions & 0 deletions llvm/test/CodeGen/RISCV/half-convert-strict.ll
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,9 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) strictfp {
; CHECK32-D-NEXT: seqz a1, a0
; CHECK32-D-NEXT: add a0, a0, a1
; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-D-NEXT: .cfi_restore ra
; CHECK32-D-NEXT: addi sp, sp, 16
; CHECK32-D-NEXT: .cfi_def_cfa_offset 0
; CHECK32-D-NEXT: ret
%a = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict")
%b = icmp eq i32 %a, 0
Expand Down Expand Up @@ -2359,7 +2361,11 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) strictfp {
; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-D-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; CHECK32-D-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; CHECK32-D-NEXT: .cfi_restore ra
; CHECK32-D-NEXT: .cfi_restore s0
; CHECK32-D-NEXT: .cfi_restore s1
; CHECK32-D-NEXT: addi sp, sp, 16
; CHECK32-D-NEXT: .cfi_def_cfa_offset 0
; CHECK32-D-NEXT: ret
%3 = add i32 %0, 1
%4 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
Expand Down Expand Up @@ -2493,7 +2499,11 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) strictfp {
; CHECK32-D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-D-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; CHECK32-D-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; CHECK32-D-NEXT: .cfi_restore ra
; CHECK32-D-NEXT: .cfi_restore s0
; CHECK32-D-NEXT: .cfi_restore s1
; CHECK32-D-NEXT: addi sp, sp, 16
; CHECK32-D-NEXT: .cfi_def_cfa_offset 0
; CHECK32-D-NEXT: ret
%3 = add i32 %0, 1
%4 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
Expand Down
20 changes: 20 additions & 0 deletions llvm/test/CodeGen/RISCV/half-intrinsics.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3020,7 +3020,12 @@ define half @maximumnum_half(half %x, half %y) {
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32I-NEXT: .cfi_restore ra
; RV32I-NEXT: .cfi_restore s0
; RV32I-NEXT: .cfi_restore s1
; RV32I-NEXT: .cfi_restore s2
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: maximumnum_half:
Expand Down Expand Up @@ -3051,7 +3056,12 @@ define half @maximumnum_half(half %x, half %y) {
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: .cfi_restore ra
; RV64I-NEXT: .cfi_restore s0
; RV64I-NEXT: .cfi_restore s1
; RV64I-NEXT: .cfi_restore s2
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
;
; CHECKIZFHMIN-LABEL: maximumnum_half:
Expand Down Expand Up @@ -3114,7 +3124,12 @@ define half @minimumnum_half(half %x, half %y) {
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32I-NEXT: .cfi_restore ra
; RV32I-NEXT: .cfi_restore s0
; RV32I-NEXT: .cfi_restore s1
; RV32I-NEXT: .cfi_restore s2
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: minimumnum_half:
Expand Down Expand Up @@ -3145,7 +3160,12 @@ define half @minimumnum_half(half %x, half %y) {
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: .cfi_restore ra
; RV64I-NEXT: .cfi_restore s0
; RV64I-NEXT: .cfi_restore s1
; RV64I-NEXT: .cfi_restore s2
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
;
; CHECKIZFHMIN-LABEL: minimumnum_half:
Expand Down
80 changes: 80 additions & 0 deletions llvm/test/CodeGen/RISCV/half-round-conv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,9 @@ define i64 @test_floor_si64(half %x) {
; RV32IZFH-NEXT: .cfi_offset ra, -4
; RV32IZFH-NEXT: call __fixhfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: .cfi_restore ra
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: .cfi_def_cfa_offset 0
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_floor_si64:
Expand All @@ -351,7 +353,9 @@ define i64 @test_floor_si64(half %x) {
; RV32IZHINX-NEXT: .cfi_offset ra, -4
; RV32IZHINX-NEXT: call __fixhfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: .cfi_restore ra
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_floor_si64:
Expand Down Expand Up @@ -389,7 +393,9 @@ define i64 @test_floor_si64(half %x) {
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
; RV32IZFHMIN-NEXT: call __fixhfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: .cfi_restore ra
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_floor_si64:
Expand Down Expand Up @@ -429,7 +435,9 @@ define i64 @test_floor_si64(half %x) {
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: call __fixhfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: .cfi_restore ra
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_floor_si64:
Expand Down Expand Up @@ -762,7 +770,9 @@ define i64 @test_floor_ui64(half %x) {
; RV32IZFH-NEXT: .cfi_offset ra, -4
; RV32IZFH-NEXT: call __fixunshfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: .cfi_restore ra
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: .cfi_def_cfa_offset 0
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_floor_ui64:
Expand All @@ -788,7 +798,9 @@ define i64 @test_floor_ui64(half %x) {
; RV32IZHINX-NEXT: .cfi_offset ra, -4
; RV32IZHINX-NEXT: call __fixunshfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: .cfi_restore ra
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_floor_ui64:
Expand Down Expand Up @@ -826,7 +838,9 @@ define i64 @test_floor_ui64(half %x) {
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
; RV32IZFHMIN-NEXT: call __fixunshfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: .cfi_restore ra
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_floor_ui64:
Expand Down Expand Up @@ -866,7 +880,9 @@ define i64 @test_floor_ui64(half %x) {
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: call __fixunshfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: .cfi_restore ra
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_floor_ui64:
Expand Down Expand Up @@ -1199,7 +1215,9 @@ define i64 @test_ceil_si64(half %x) {
; RV32IZFH-NEXT: .cfi_offset ra, -4
; RV32IZFH-NEXT: call __fixhfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: .cfi_restore ra
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: .cfi_def_cfa_offset 0
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_ceil_si64:
Expand All @@ -1225,7 +1243,9 @@ define i64 @test_ceil_si64(half %x) {
; RV32IZHINX-NEXT: .cfi_offset ra, -4
; RV32IZHINX-NEXT: call __fixhfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: .cfi_restore ra
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_ceil_si64:
Expand Down Expand Up @@ -1263,7 +1283,9 @@ define i64 @test_ceil_si64(half %x) {
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
; RV32IZFHMIN-NEXT: call __fixhfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: .cfi_restore ra
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_ceil_si64:
Expand Down Expand Up @@ -1303,7 +1325,9 @@ define i64 @test_ceil_si64(half %x) {
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: call __fixhfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: .cfi_restore ra
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_ceil_si64:
Expand Down Expand Up @@ -1636,7 +1660,9 @@ define i64 @test_ceil_ui64(half %x) {
; RV32IZFH-NEXT: .cfi_offset ra, -4
; RV32IZFH-NEXT: call __fixunshfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: .cfi_restore ra
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: .cfi_def_cfa_offset 0
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_ceil_ui64:
Expand All @@ -1662,7 +1688,9 @@ define i64 @test_ceil_ui64(half %x) {
; RV32IZHINX-NEXT: .cfi_offset ra, -4
; RV32IZHINX-NEXT: call __fixunshfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: .cfi_restore ra
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_ceil_ui64:
Expand Down Expand Up @@ -1700,7 +1728,9 @@ define i64 @test_ceil_ui64(half %x) {
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
; RV32IZFHMIN-NEXT: call __fixunshfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: .cfi_restore ra
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_ceil_ui64:
Expand Down Expand Up @@ -1740,7 +1770,9 @@ define i64 @test_ceil_ui64(half %x) {
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: call __fixunshfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: .cfi_restore ra
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_ceil_ui64:
Expand Down Expand Up @@ -2073,7 +2105,9 @@ define i64 @test_trunc_si64(half %x) {
; RV32IZFH-NEXT: .cfi_offset ra, -4
; RV32IZFH-NEXT: call __fixhfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: .cfi_restore ra
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: .cfi_def_cfa_offset 0
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_trunc_si64:
Expand All @@ -2099,7 +2133,9 @@ define i64 @test_trunc_si64(half %x) {
; RV32IZHINX-NEXT: .cfi_offset ra, -4
; RV32IZHINX-NEXT: call __fixhfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: .cfi_restore ra
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_trunc_si64:
Expand Down Expand Up @@ -2137,7 +2173,9 @@ define i64 @test_trunc_si64(half %x) {
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
; RV32IZFHMIN-NEXT: call __fixhfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: .cfi_restore ra
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_trunc_si64:
Expand Down Expand Up @@ -2177,7 +2215,9 @@ define i64 @test_trunc_si64(half %x) {
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: call __fixhfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: .cfi_restore ra
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_trunc_si64:
Expand Down Expand Up @@ -2510,7 +2550,9 @@ define i64 @test_trunc_ui64(half %x) {
; RV32IZFH-NEXT: .cfi_offset ra, -4
; RV32IZFH-NEXT: call __fixunshfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: .cfi_restore ra
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: .cfi_def_cfa_offset 0
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_trunc_ui64:
Expand All @@ -2536,7 +2578,9 @@ define i64 @test_trunc_ui64(half %x) {
; RV32IZHINX-NEXT: .cfi_offset ra, -4
; RV32IZHINX-NEXT: call __fixunshfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: .cfi_restore ra
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_trunc_ui64:
Expand Down Expand Up @@ -2574,7 +2618,9 @@ define i64 @test_trunc_ui64(half %x) {
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
; RV32IZFHMIN-NEXT: call __fixunshfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: .cfi_restore ra
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_trunc_ui64:
Expand Down Expand Up @@ -2614,7 +2660,9 @@ define i64 @test_trunc_ui64(half %x) {
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: call __fixunshfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: .cfi_restore ra
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_trunc_ui64:
Expand Down Expand Up @@ -2947,7 +2995,9 @@ define i64 @test_round_si64(half %x) {
; RV32IZFH-NEXT: .cfi_offset ra, -4
; RV32IZFH-NEXT: call __fixhfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: .cfi_restore ra
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: .cfi_def_cfa_offset 0
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_round_si64:
Expand All @@ -2973,7 +3023,9 @@ define i64 @test_round_si64(half %x) {
; RV32IZHINX-NEXT: .cfi_offset ra, -4
; RV32IZHINX-NEXT: call __fixhfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: .cfi_restore ra
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_round_si64:
Expand Down Expand Up @@ -3011,7 +3063,9 @@ define i64 @test_round_si64(half %x) {
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
; RV32IZFHMIN-NEXT: call __fixhfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: .cfi_restore ra
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_round_si64:
Expand Down Expand Up @@ -3051,7 +3105,9 @@ define i64 @test_round_si64(half %x) {
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: call __fixhfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: .cfi_restore ra
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_round_si64:
Expand Down Expand Up @@ -3384,7 +3440,9 @@ define i64 @test_round_ui64(half %x) {
; RV32IZFH-NEXT: .cfi_offset ra, -4
; RV32IZFH-NEXT: call __fixunshfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: .cfi_restore ra
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: .cfi_def_cfa_offset 0
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_round_ui64:
Expand All @@ -3410,7 +3468,9 @@ define i64 @test_round_ui64(half %x) {
; RV32IZHINX-NEXT: .cfi_offset ra, -4
; RV32IZHINX-NEXT: call __fixunshfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: .cfi_restore ra
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_round_ui64:
Expand Down Expand Up @@ -3448,7 +3508,9 @@ define i64 @test_round_ui64(half %x) {
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
; RV32IZFHMIN-NEXT: call __fixunshfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: .cfi_restore ra
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_round_ui64:
Expand Down Expand Up @@ -3488,7 +3550,9 @@ define i64 @test_round_ui64(half %x) {
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: call __fixunshfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: .cfi_restore ra
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_round_ui64:
Expand Down Expand Up @@ -3821,7 +3885,9 @@ define i64 @test_roundeven_si64(half %x) {
; RV32IZFH-NEXT: .cfi_offset ra, -4
; RV32IZFH-NEXT: call __fixhfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: .cfi_restore ra
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: .cfi_def_cfa_offset 0
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_roundeven_si64:
Expand All @@ -3847,7 +3913,9 @@ define i64 @test_roundeven_si64(half %x) {
; RV32IZHINX-NEXT: .cfi_offset ra, -4
; RV32IZHINX-NEXT: call __fixhfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: .cfi_restore ra
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_roundeven_si64:
Expand Down Expand Up @@ -3885,7 +3953,9 @@ define i64 @test_roundeven_si64(half %x) {
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
; RV32IZFHMIN-NEXT: call __fixhfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: .cfi_restore ra
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_roundeven_si64:
Expand Down Expand Up @@ -3925,7 +3995,9 @@ define i64 @test_roundeven_si64(half %x) {
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: call __fixhfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: .cfi_restore ra
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_roundeven_si64:
Expand Down Expand Up @@ -4258,7 +4330,9 @@ define i64 @test_roundeven_ui64(half %x) {
; RV32IZFH-NEXT: .cfi_offset ra, -4
; RV32IZFH-NEXT: call __fixunshfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: .cfi_restore ra
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: .cfi_def_cfa_offset 0
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_roundeven_ui64:
Expand All @@ -4284,7 +4358,9 @@ define i64 @test_roundeven_ui64(half %x) {
; RV32IZHINX-NEXT: .cfi_offset ra, -4
; RV32IZHINX-NEXT: call __fixunshfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: .cfi_restore ra
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_roundeven_ui64:
Expand Down Expand Up @@ -4322,7 +4398,9 @@ define i64 @test_roundeven_ui64(half %x) {
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
; RV32IZFHMIN-NEXT: call __fixunshfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: .cfi_restore ra
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_roundeven_ui64:
Expand Down Expand Up @@ -4362,7 +4440,9 @@ define i64 @test_roundeven_ui64(half %x) {
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: call __fixunshfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: .cfi_restore ra
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 0
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_roundeven_ui64:
Expand Down
4 changes: 4 additions & 0 deletions llvm/test/CodeGen/RISCV/hwasan-check-memaccess.ll
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ define ptr @f2(ptr %x0, ptr %x1) {
; CHECK-NEXT: mv t0, a1
; CHECK-NEXT: call __hwasan_check_x10_2_short
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; COMPRESS-LABEL: f2:
Expand All @@ -26,7 +28,9 @@ define ptr @f2(ptr %x0, ptr %x1) {
; COMPRESS-NEXT: c.mv t0, a1
; COMPRESS-NEXT: call __hwasan_check_x10_2_short
; COMPRESS-NEXT: c.ldsp ra, 8(sp) # 8-byte Folded Reload
; COMPRESS-NEXT: .cfi_restore ra
; COMPRESS-NEXT: c.addi sp, 16
; COMPRESS-NEXT: .cfi_def_cfa_offset 0
; COMPRESS-NEXT: c.jr ra
call void @llvm.hwasan.check.memaccess.shortgranules(ptr %x1, ptr %x0, i32 2)
ret ptr %x0
Expand Down
2 changes: 0 additions & 2 deletions llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
Original file line number Diff line number Diff line change
Expand Up @@ -108,15 +108,13 @@ define double @constraint_f_double_abi_name(double %a) nounwind {
define double @constraint_gpr(double %x) {
; RV32F-LABEL: constraint_gpr:
; RV32F: # %bb.0:
; RV32F-NEXT: .cfi_def_cfa_offset 0
; RV32F-NEXT: #APP
; RV32F-NEXT: mv a0, a0
; RV32F-NEXT: #NO_APP
; RV32F-NEXT: ret
;
; RV64F-LABEL: constraint_gpr:
; RV64F: # %bb.0:
; RV64F-NEXT: .cfi_def_cfa_offset 0
; RV64F-NEXT: #APP
; RV64F-NEXT: mv a0, a0
; RV64F-NEXT: #NO_APP
Expand Down
2 changes: 0 additions & 2 deletions llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll
Original file line number Diff line number Diff line change
Expand Up @@ -94,15 +94,13 @@ define float @constraint_f_float_abi_name(float %a) nounwind {
define float @constraint_gpr(float %x) {
; RV32F-LABEL: constraint_gpr:
; RV32F: # %bb.0:
; RV32F-NEXT: .cfi_def_cfa_offset 0
; RV32F-NEXT: #APP
; RV32F-NEXT: mv a0, a0
; RV32F-NEXT: #NO_APP
; RV32F-NEXT: ret
;
; RV64F-LABEL: constraint_gpr:
; RV64F: # %bb.0:
; RV64F-NEXT: .cfi_def_cfa_offset 0
; RV64F-NEXT: #APP
; RV64F-NEXT: mv a0, a0
; RV64F-NEXT: #NO_APP
Expand Down
5 changes: 0 additions & 5 deletions llvm/test/CodeGen/RISCV/inline-asm-mem-constraint.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3365,7 +3365,6 @@ label:
define void @should_not_fold() {
; RV32I-LABEL: should_not_fold:
; RV32I: # %bb.0: # %start
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: lui a0, %hi(_ZN5repro9MY_BUFFER17hb0f674501d5980a6E)
; RV32I-NEXT: addi a0, a0, %lo(_ZN5repro9MY_BUFFER17hb0f674501d5980a6E)
; RV32I-NEXT: #APP
Expand All @@ -3375,7 +3374,6 @@ define void @should_not_fold() {
;
; RV64I-LABEL: should_not_fold:
; RV64I: # %bb.0: # %start
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: lui a0, %hi(_ZN5repro9MY_BUFFER17hb0f674501d5980a6E)
; RV64I-NEXT: addi a0, a0, %lo(_ZN5repro9MY_BUFFER17hb0f674501d5980a6E)
; RV64I-NEXT: #APP
Expand All @@ -3385,7 +3383,6 @@ define void @should_not_fold() {
;
; RV32I-MEDIUM-LABEL: should_not_fold:
; RV32I-MEDIUM: # %bb.0: # %start
; RV32I-MEDIUM-NEXT: .cfi_def_cfa_offset 0
; RV32I-MEDIUM-NEXT: .Lpcrel_hi37:
; RV32I-MEDIUM-NEXT: auipc a0, %pcrel_hi(_ZN5repro9MY_BUFFER17hb0f674501d5980a6E)
; RV32I-MEDIUM-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi37)
Expand All @@ -3396,7 +3393,6 @@ define void @should_not_fold() {
;
; RV64I-MEDIUM-LABEL: should_not_fold:
; RV64I-MEDIUM: # %bb.0: # %start
; RV64I-MEDIUM-NEXT: .cfi_def_cfa_offset 0
; RV64I-MEDIUM-NEXT: .Lpcrel_hi37:
; RV64I-MEDIUM-NEXT: auipc a0, %pcrel_hi(_ZN5repro9MY_BUFFER17hb0f674501d5980a6E)
; RV64I-MEDIUM-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi37)
Expand All @@ -3407,7 +3403,6 @@ define void @should_not_fold() {
;
; RV64I-LARGE-LABEL: should_not_fold:
; RV64I-LARGE: # %bb.0: # %start
; RV64I-LARGE-NEXT: .cfi_def_cfa_offset 0
; RV64I-LARGE-NEXT: .Lpcrel_hi37:
; RV64I-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI45_0)
; RV64I-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi37)(a0)
Expand Down
4 changes: 0 additions & 4 deletions llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,6 @@ define half @constraint_f_half_abi_name(half %a) nounwind {
define half @constraint_gpr(half %x) {
; RV32ZFH-LABEL: constraint_gpr:
; RV32ZFH: # %bb.0:
; RV32ZFH-NEXT: .cfi_def_cfa_offset 0
; RV32ZFH-NEXT: fmv.x.h a0, fa0
; RV32ZFH-NEXT: #APP
; RV32ZFH-NEXT: mv a0, a0
Expand All @@ -166,7 +165,6 @@ define half @constraint_gpr(half %x) {
;
; RV64ZFH-LABEL: constraint_gpr:
; RV64ZFH: # %bb.0:
; RV64ZFH-NEXT: .cfi_def_cfa_offset 0
; RV64ZFH-NEXT: fmv.x.h a0, fa0
; RV64ZFH-NEXT: #APP
; RV64ZFH-NEXT: mv a0, a0
Expand All @@ -176,7 +174,6 @@ define half @constraint_gpr(half %x) {
;
; RV32DZFH-LABEL: constraint_gpr:
; RV32DZFH: # %bb.0:
; RV32DZFH-NEXT: .cfi_def_cfa_offset 0
; RV32DZFH-NEXT: fmv.x.h a0, fa0
; RV32DZFH-NEXT: #APP
; RV32DZFH-NEXT: mv a0, a0
Expand All @@ -186,7 +183,6 @@ define half @constraint_gpr(half %x) {
;
; RV64DZFH-LABEL: constraint_gpr:
; RV64DZFH: # %bb.0:
; RV64DZFH-NEXT: .cfi_def_cfa_offset 0
; RV64DZFH-NEXT: fmv.x.h a0, fa0
; RV64DZFH-NEXT: #APP
; RV64DZFH-NEXT: mv a0, a0
Expand Down
3 changes: 3 additions & 0 deletions llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,11 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: slli a2, a2, 1
; RV32-NEXT: add sp, sp, a2
; RV32-NEXT: .cfi_def_cfa sp, 48
; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: ctz_nxv8i1_no_range:
Expand Down
2 changes: 2 additions & 0 deletions llvm/test/CodeGen/RISCV/kcfi-mir.ll
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@ define void @f1(ptr noundef %x) !kcfi_type !1 {
; CHECK-NEXT: PseudoCALLIndirect killed $x10, csr_ilp32_lp64, implicit-def dead $x1, implicit-def $x2
; CHECK-NEXT: }
; CHECK-NEXT: $x1 = LD $x2, 8 :: (load (s64) from %stack.0)
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 16
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
; CHECK-NEXT: PseudoRET
call void %x() [ "kcfi"(i32 12345678) ]
ret void
Expand Down
15 changes: 15 additions & 0 deletions llvm/test/CodeGen/RISCV/large-stack.ll
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ define void @test() {
; RV32I-FPELIM-NEXT: lui a0, 74565
; RV32I-FPELIM-NEXT: addi a0, a0, 1664
; RV32I-FPELIM-NEXT: add sp, sp, a0
; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 0
; RV32I-FPELIM-NEXT: ret
;
; RV32I-WITHFP-LABEL: test:
Expand All @@ -34,9 +35,13 @@ define void @test() {
; RV32I-WITHFP-NEXT: lui a0, 74565
; RV32I-WITHFP-NEXT: addi a0, a0, -352
; RV32I-WITHFP-NEXT: add sp, sp, a0
; RV32I-WITHFP-NEXT: .cfi_def_cfa sp, 2032
; RV32I-WITHFP-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: .cfi_restore ra
; RV32I-WITHFP-NEXT: .cfi_restore s0
; RV32I-WITHFP-NEXT: addi sp, sp, 2032
; RV32I-WITHFP-NEXT: .cfi_def_cfa_offset 0
; RV32I-WITHFP-NEXT: ret
%tmp = alloca [ 305419896 x i8 ] , align 4
ret void
Expand Down Expand Up @@ -71,9 +76,13 @@ define void @test_emergency_spill_slot(i32 %a) {
; RV32I-FPELIM-NEXT: lui a0, 97
; RV32I-FPELIM-NEXT: addi a0, a0, 672
; RV32I-FPELIM-NEXT: add sp, sp, a0
; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 2032
; RV32I-FPELIM-NEXT: lw s0, 2028(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: lw s1, 2024(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: .cfi_restore s0
; RV32I-FPELIM-NEXT: .cfi_restore s1
; RV32I-FPELIM-NEXT: addi sp, sp, 2032
; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 0
; RV32I-FPELIM-NEXT: ret
;
; RV32I-WITHFP-LABEL: test_emergency_spill_slot:
Expand Down Expand Up @@ -108,11 +117,17 @@ define void @test_emergency_spill_slot(i32 %a) {
; RV32I-WITHFP-NEXT: lui a0, 97
; RV32I-WITHFP-NEXT: addi a0, a0, 688
; RV32I-WITHFP-NEXT: add sp, sp, a0
; RV32I-WITHFP-NEXT: .cfi_def_cfa sp, 2032
; RV32I-WITHFP-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s1, 2020(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s2, 2016(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: .cfi_restore ra
; RV32I-WITHFP-NEXT: .cfi_restore s0
; RV32I-WITHFP-NEXT: .cfi_restore s1
; RV32I-WITHFP-NEXT: .cfi_restore s2
; RV32I-WITHFP-NEXT: addi sp, sp, 2032
; RV32I-WITHFP-NEXT: .cfi_def_cfa_offset 0
; RV32I-WITHFP-NEXT: ret
%data = alloca [ 100000 x i32 ] , align 4
%ptr = getelementptr inbounds [100000 x i32], ptr %data, i32 0, i32 80000
Expand Down
2 changes: 2 additions & 0 deletions llvm/test/CodeGen/RISCV/live-sp.mir
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,9 @@ body: |
; CHECK-NEXT: $x10 = COPY $x0
; CHECK-NEXT: PseudoCALL target-flags(riscv-call) @vararg, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit $x11, implicit-def $x2
; CHECK-NEXT: $x1 = LD $x2, 8 :: (load (s64) from %stack.1)
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 16
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
; CHECK-NEXT: PseudoRET
SW renamable $x1, %stack.0.a, 0 :: (store (s32) into %ir.a)
renamable $x11 = ADDIW killed renamable $x1, 0
Expand Down
120 changes: 120 additions & 0 deletions llvm/test/CodeGen/RISCV/llvm.exp10.ll
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,9 @@ define half @exp10_f16(half %x) {
; RV32IFD-NEXT: or a0, a0, a1
; RV32IFD-NEXT: fmv.w.x fa0, a0
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: .cfi_def_cfa_offset 0
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: exp10_f16:
Expand All @@ -54,7 +56,9 @@ define half @exp10_f16(half %x) {
; RV64IFD-NEXT: or a0, a0, a1
; RV64IFD-NEXT: fmv.w.x fa0, a0
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: .cfi_restore ra
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: .cfi_def_cfa_offset 0
; RV64IFD-NEXT: ret
%r = call half @llvm.exp10.f16(half %x)
ret half %r
Expand All @@ -73,7 +77,9 @@ define <1 x half> @exp10_v1f16(<1 x half> %x) {
; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.x.w a0, fa0
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: .cfi_def_cfa_offset 0
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: exp10_v1f16:
Expand All @@ -88,7 +94,9 @@ define <1 x half> @exp10_v1f16(<1 x half> %x) {
; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.x.w a0, fa0
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: .cfi_restore ra
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: .cfi_def_cfa_offset 0
; RV64IFD-NEXT: ret
%r = call <1 x half> @llvm.exp10.v1f16(<1 x half> %x)
ret <1 x half> %r
Expand Down Expand Up @@ -120,7 +128,11 @@ define <2 x half> @exp10_v2f16(<2 x half> %x) {
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: .cfi_restore s0
; RV32IFD-NEXT: .cfi_restore fs0
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: .cfi_def_cfa_offset 0
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: exp10_v2f16:
Expand Down Expand Up @@ -148,7 +160,11 @@ define <2 x half> @exp10_v2f16(<2 x half> %x) {
; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: .cfi_restore ra
; RV64IFD-NEXT: .cfi_restore s0
; RV64IFD-NEXT: .cfi_restore s1
; RV64IFD-NEXT: addi sp, sp, 32
; RV64IFD-NEXT: .cfi_def_cfa_offset 0
; RV64IFD-NEXT: ret
%r = call <2 x half> @llvm.exp10.v2f16(<2 x half> %x)
ret <2 x half> %r
Expand Down Expand Up @@ -205,7 +221,14 @@ define <3 x half> @exp10_v3f16(<3 x half> %x) {
; RV32IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: .cfi_restore s0
; RV32IFD-NEXT: .cfi_restore s1
; RV32IFD-NEXT: .cfi_restore fs0
; RV32IFD-NEXT: .cfi_restore fs1
; RV32IFD-NEXT: .cfi_restore fs2
; RV32IFD-NEXT: addi sp, sp, 48
; RV32IFD-NEXT: .cfi_def_cfa_offset 0
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: exp10_v3f16:
Expand Down Expand Up @@ -253,7 +276,13 @@ define <3 x half> @exp10_v3f16(<3 x half> %x) {
; RV64IFD-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs0, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: .cfi_restore ra
; RV64IFD-NEXT: .cfi_restore s0
; RV64IFD-NEXT: .cfi_restore s1
; RV64IFD-NEXT: .cfi_restore s2
; RV64IFD-NEXT: .cfi_restore fs0
; RV64IFD-NEXT: addi sp, sp, 48
; RV64IFD-NEXT: .cfi_def_cfa_offset 0
; RV64IFD-NEXT: ret
%r = call <3 x half> @llvm.exp10.v3f16(<3 x half> %x)
ret <3 x half> %r
Expand Down Expand Up @@ -326,7 +355,17 @@ define <4 x half> @exp10_v4f16(<4 x half> %x) {
; RV32IFD-NEXT: fld fs1, 24(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs2, 16(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs3, 8(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: .cfi_restore s0
; RV32IFD-NEXT: .cfi_restore s1
; RV32IFD-NEXT: .cfi_restore s2
; RV32IFD-NEXT: .cfi_restore s3
; RV32IFD-NEXT: .cfi_restore fs0
; RV32IFD-NEXT: .cfi_restore fs1
; RV32IFD-NEXT: .cfi_restore fs2
; RV32IFD-NEXT: .cfi_restore fs3
; RV32IFD-NEXT: addi sp, sp, 64
; RV32IFD-NEXT: .cfi_def_cfa_offset 0
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: exp10_v4f16:
Expand Down Expand Up @@ -389,7 +428,16 @@ define <4 x half> @exp10_v4f16(<4 x half> %x) {
; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: .cfi_restore ra
; RV64IFD-NEXT: .cfi_restore s0
; RV64IFD-NEXT: .cfi_restore s1
; RV64IFD-NEXT: .cfi_restore s2
; RV64IFD-NEXT: .cfi_restore s3
; RV64IFD-NEXT: .cfi_restore fs0
; RV64IFD-NEXT: .cfi_restore fs1
; RV64IFD-NEXT: .cfi_restore fs2
; RV64IFD-NEXT: addi sp, sp, 64
; RV64IFD-NEXT: .cfi_def_cfa_offset 0
; RV64IFD-NEXT: ret
%r = call <4 x half> @llvm.exp10.v4f16(<4 x half> %x)
ret <4 x half> %r
Expand All @@ -412,7 +460,9 @@ define <1 x float> @exp10_v1f32(<1 x float> %x) {
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: .cfi_def_cfa_offset 0
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: exp10_v1f32:
Expand All @@ -423,7 +473,9 @@ define <1 x float> @exp10_v1f32(<1 x float> %x) {
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: .cfi_restore ra
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: .cfi_def_cfa_offset 0
; RV64IFD-NEXT: ret
%r = call <1 x float> @llvm.exp10.v1f32(<1 x float> %x)
ret <1 x float> %r
Expand All @@ -450,7 +502,11 @@ define <2 x float> @exp10_v2f32(<2 x float> %x) {
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: .cfi_restore fs0
; RV32IFD-NEXT: .cfi_restore fs1
; RV32IFD-NEXT: addi sp, sp, 32
; RV32IFD-NEXT: .cfi_def_cfa_offset 0
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: exp10_v2f32:
Expand All @@ -473,7 +529,11 @@ define <2 x float> @exp10_v2f32(<2 x float> %x) {
; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: .cfi_restore ra
; RV64IFD-NEXT: .cfi_restore fs0
; RV64IFD-NEXT: .cfi_restore fs1
; RV64IFD-NEXT: addi sp, sp, 32
; RV64IFD-NEXT: .cfi_def_cfa_offset 0
; RV64IFD-NEXT: ret
%r = call <2 x float> @llvm.exp10.v2f32(<2 x float> %x)
ret <2 x float> %r
Expand Down Expand Up @@ -512,7 +572,13 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) {
; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: .cfi_restore s0
; RV32IFD-NEXT: .cfi_restore fs0
; RV32IFD-NEXT: .cfi_restore fs1
; RV32IFD-NEXT: .cfi_restore fs2
; RV32IFD-NEXT: addi sp, sp, 32
; RV32IFD-NEXT: .cfi_def_cfa_offset 0
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: exp10_v3f32:
Expand Down Expand Up @@ -551,7 +617,13 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) {
; RV64IFD-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: .cfi_restore ra
; RV64IFD-NEXT: .cfi_restore s0
; RV64IFD-NEXT: .cfi_restore s1
; RV64IFD-NEXT: .cfi_restore fs0
; RV64IFD-NEXT: .cfi_restore fs1
; RV64IFD-NEXT: addi sp, sp, 48
; RV64IFD-NEXT: .cfi_def_cfa_offset 0
; RV64IFD-NEXT: ret
%r = call <3 x float> @llvm.exp10.v3f32(<3 x float> %x)
ret <3 x float> %r
Expand Down Expand Up @@ -598,7 +670,14 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
; RV32IFD-NEXT: fld fs1, 24(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs2, 16(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs3, 8(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: .cfi_restore s0
; RV32IFD-NEXT: .cfi_restore fs0
; RV32IFD-NEXT: .cfi_restore fs1
; RV32IFD-NEXT: .cfi_restore fs2
; RV32IFD-NEXT: .cfi_restore fs3
; RV32IFD-NEXT: addi sp, sp, 48
; RV32IFD-NEXT: .cfi_def_cfa_offset 0
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: exp10_v4f32:
Expand Down Expand Up @@ -641,7 +720,14 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
; RV64IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs3, 0(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: .cfi_restore ra
; RV64IFD-NEXT: .cfi_restore s0
; RV64IFD-NEXT: .cfi_restore fs0
; RV64IFD-NEXT: .cfi_restore fs1
; RV64IFD-NEXT: .cfi_restore fs2
; RV64IFD-NEXT: .cfi_restore fs3
; RV64IFD-NEXT: addi sp, sp, 48
; RV64IFD-NEXT: .cfi_def_cfa_offset 0
; RV64IFD-NEXT: ret
%r = call <4 x float> @llvm.exp10.v4f32(<4 x float> %x)
ret <4 x float> %r
Expand Down Expand Up @@ -682,7 +768,11 @@ define <2 x double> @exp10_v2f64(<2 x double> %x) {
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: .cfi_restore fs0
; RV32IFD-NEXT: .cfi_restore fs1
; RV32IFD-NEXT: addi sp, sp, 32
; RV32IFD-NEXT: .cfi_def_cfa_offset 0
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: exp10_v2f64:
Expand All @@ -705,7 +795,11 @@ define <2 x double> @exp10_v2f64(<2 x double> %x) {
; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: .cfi_restore ra
; RV64IFD-NEXT: .cfi_restore fs0
; RV64IFD-NEXT: .cfi_restore fs1
; RV64IFD-NEXT: addi sp, sp, 32
; RV64IFD-NEXT: .cfi_def_cfa_offset 0
; RV64IFD-NEXT: ret
%r = call <2 x double> @llvm.exp10.v2f64(<2 x double> %x)
ret <2 x double> %r
Expand Down Expand Up @@ -744,7 +838,13 @@ define <3 x double> @exp10_v3f64(<3 x double> %x) {
; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: .cfi_restore s0
; RV32IFD-NEXT: .cfi_restore fs0
; RV32IFD-NEXT: .cfi_restore fs1
; RV32IFD-NEXT: .cfi_restore fs2
; RV32IFD-NEXT: addi sp, sp, 32
; RV32IFD-NEXT: .cfi_def_cfa_offset 0
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: exp10_v3f64:
Expand Down Expand Up @@ -779,7 +879,13 @@ define <3 x double> @exp10_v3f64(<3 x double> %x) {
; RV64IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: .cfi_restore ra
; RV64IFD-NEXT: .cfi_restore s0
; RV64IFD-NEXT: .cfi_restore fs0
; RV64IFD-NEXT: .cfi_restore fs1
; RV64IFD-NEXT: .cfi_restore fs2
; RV64IFD-NEXT: addi sp, sp, 48
; RV64IFD-NEXT: .cfi_def_cfa_offset 0
; RV64IFD-NEXT: ret
%r = call <3 x double> @llvm.exp10.v3f64(<3 x double> %x)
ret <3 x double> %r
Expand Down Expand Up @@ -826,7 +932,14 @@ define <4 x double> @exp10_v4f64(<4 x double> %x) {
; RV32IFD-NEXT: fld fs1, 24(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs2, 16(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: fld fs3, 8(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: .cfi_restore ra
; RV32IFD-NEXT: .cfi_restore s0
; RV32IFD-NEXT: .cfi_restore fs0
; RV32IFD-NEXT: .cfi_restore fs1
; RV32IFD-NEXT: .cfi_restore fs2
; RV32IFD-NEXT: .cfi_restore fs3
; RV32IFD-NEXT: addi sp, sp, 48
; RV32IFD-NEXT: .cfi_def_cfa_offset 0
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: exp10_v4f64:
Expand Down Expand Up @@ -869,7 +982,14 @@ define <4 x double> @exp10_v4f64(<4 x double> %x) {
; RV64IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs3, 0(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: .cfi_restore ra
; RV64IFD-NEXT: .cfi_restore s0
; RV64IFD-NEXT: .cfi_restore fs0
; RV64IFD-NEXT: .cfi_restore fs1
; RV64IFD-NEXT: .cfi_restore fs2
; RV64IFD-NEXT: .cfi_restore fs3
; RV64IFD-NEXT: addi sp, sp, 48
; RV64IFD-NEXT: .cfi_def_cfa_offset 0
; RV64IFD-NEXT: ret
%r = call <4 x double> @llvm.exp10.v4f64(<4 x double> %x)
ret <4 x double> %r
Expand Down
14 changes: 14 additions & 0 deletions llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ define void @use_frame_base_reg() {
; RV32I-NEXT: lui a0, 24
; RV32I-NEXT: addi a0, a0, 1712
; RV32I-NEXT: add sp, sp, a0
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: use_frame_base_reg:
Expand All @@ -36,6 +37,7 @@ define void @use_frame_base_reg() {
; RV64I-NEXT: lui a0, 24
; RV64I-NEXT: addiw a0, a0, 1712
; RV64I-NEXT: add sp, sp, a0
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret

%va = alloca i8, align 4
Expand All @@ -59,6 +61,7 @@ define void @load_with_offset() {
; RV32I-NEXT: sb a1, 0(a0)
; RV32I-NEXT: addi sp, sp, 2032
; RV32I-NEXT: addi sp, sp, 480
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: load_with_offset:
Expand All @@ -71,6 +74,7 @@ define void @load_with_offset() {
; RV64I-NEXT: sb a1, 0(a0)
; RV64I-NEXT: addi sp, sp, 2032
; RV64I-NEXT: addi sp, sp, 480
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret

%va = alloca [2500 x i8], align 4
Expand All @@ -92,6 +96,7 @@ define void @load_with_offset2() {
; RV32I-NEXT: sb a0, 1412(sp)
; RV32I-NEXT: addi sp, sp, 2032
; RV32I-NEXT: addi sp, sp, 480
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: load_with_offset2:
Expand All @@ -103,6 +108,7 @@ define void @load_with_offset2() {
; RV64I-NEXT: sb a0, 1412(sp)
; RV64I-NEXT: addi sp, sp, 2032
; RV64I-NEXT: addi sp, sp, 480
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret

%va = alloca [2500 x i8], align 4
Expand All @@ -127,9 +133,13 @@ define void @frame_pointer() "frame-pointer"="all" {
; RV32I-NEXT: lbu a0, -1960(s0)
; RV32I-NEXT: sb a0, -1960(s0)
; RV32I-NEXT: addi sp, sp, 480
; RV32I-NEXT: .cfi_def_cfa sp, 2032
; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload
; RV32I-NEXT: .cfi_restore ra
; RV32I-NEXT: .cfi_restore s0
; RV32I-NEXT: addi sp, sp, 2032
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV64I-LABEL: frame_pointer:
Expand All @@ -147,9 +157,13 @@ define void @frame_pointer() "frame-pointer"="all" {
; RV64I-NEXT: lbu a1, 0(a0)
; RV64I-NEXT: sb a1, 0(a0)
; RV64I-NEXT: addi sp, sp, 496
; RV64I-NEXT: .cfi_def_cfa sp, 2032
; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload
; RV64I-NEXT: .cfi_restore ra
; RV64I-NEXT: .cfi_restore s0
; RV64I-NEXT: addi sp, sp, 2032
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret

%va = alloca [2500 x i8], align 4
Expand Down
16 changes: 16 additions & 0 deletions llvm/test/CodeGen/RISCV/lpad.ll
Original file line number Diff line number Diff line change
Expand Up @@ -144,14 +144,18 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_remember_state
; RV32-NEXT: .Ltmp0:
; RV32-NEXT: jalr a0
; RV32-NEXT: .Ltmp1:
; RV32-NEXT: .LBB2_1: # %try.cont
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
; RV32-NEXT: .LBB2_2: # %lpad
; RV32-NEXT: .cfi_restore_state
; RV32-NEXT: .Ltmp2:
; RV32-NEXT: j .LBB2_1
;
Expand All @@ -162,14 +166,18 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_remember_state
; RV64-NEXT: .Ltmp0:
; RV64-NEXT: jalr a0
; RV64-NEXT: .Ltmp1:
; RV64-NEXT: .LBB2_1: # %try.cont
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
; RV64-NEXT: .LBB2_2: # %lpad
; RV64-NEXT: .cfi_restore_state
; RV64-NEXT: .Ltmp2:
; RV64-NEXT: j .LBB2_1
;
Expand All @@ -180,15 +188,19 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
; FIXED-ONE-RV32-NEXT: .cfi_def_cfa_offset 16
; FIXED-ONE-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; FIXED-ONE-RV32-NEXT: .cfi_offset ra, -4
; FIXED-ONE-RV32-NEXT: .cfi_remember_state
; FIXED-ONE-RV32-NEXT: .Ltmp0:
; FIXED-ONE-RV32-NEXT: lui t2, 1
; FIXED-ONE-RV32-NEXT: jalr a0
; FIXED-ONE-RV32-NEXT: .Ltmp1:
; FIXED-ONE-RV32-NEXT: .LBB2_1: # %try.cont
; FIXED-ONE-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; FIXED-ONE-RV32-NEXT: .cfi_restore ra
; FIXED-ONE-RV32-NEXT: addi sp, sp, 16
; FIXED-ONE-RV32-NEXT: .cfi_def_cfa_offset 0
; FIXED-ONE-RV32-NEXT: ret
; FIXED-ONE-RV32-NEXT: .LBB2_2: # %lpad
; FIXED-ONE-RV32-NEXT: .cfi_restore_state
; FIXED-ONE-RV32-NEXT: .Ltmp2:
; FIXED-ONE-RV32-NEXT: j .LBB2_1
;
Expand All @@ -199,15 +211,19 @@ define void @invoke(ptr %f) personality ptr @__gxx_personality_v0 {
; FIXED-ONE-RV64-NEXT: .cfi_def_cfa_offset 16
; FIXED-ONE-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; FIXED-ONE-RV64-NEXT: .cfi_offset ra, -8
; FIXED-ONE-RV64-NEXT: .cfi_remember_state
; FIXED-ONE-RV64-NEXT: .Ltmp0:
; FIXED-ONE-RV64-NEXT: lui t2, 1
; FIXED-ONE-RV64-NEXT: jalr a0
; FIXED-ONE-RV64-NEXT: .Ltmp1:
; FIXED-ONE-RV64-NEXT: .LBB2_1: # %try.cont
; FIXED-ONE-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; FIXED-ONE-RV64-NEXT: .cfi_restore ra
; FIXED-ONE-RV64-NEXT: addi sp, sp, 16
; FIXED-ONE-RV64-NEXT: .cfi_def_cfa_offset 0
; FIXED-ONE-RV64-NEXT: ret
; FIXED-ONE-RV64-NEXT: .LBB2_2: # %lpad
; FIXED-ONE-RV64-NEXT: .cfi_restore_state
; FIXED-ONE-RV64-NEXT: .Ltmp2:
; FIXED-ONE-RV64-NEXT: j .LBB2_1
entry:
Expand Down
7 changes: 7 additions & 0 deletions llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ define signext i32 @foo() #1 personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: .cfi_offset s1, -24
; CHECK-NEXT: addi s0, sp, 32
; CHECK-NEXT: .cfi_def_cfa s0, 0
; CHECK-NEXT: .cfi_remember_state
; CHECK-NEXT: .Ltmp0:
; CHECK-NEXT: addi sp, sp, -32
; CHECK-NEXT: li a0, 0
Expand All @@ -49,12 +50,18 @@ define signext i32 @foo() #1 personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: call __cxa_end_catch
; CHECK-NEXT: mv a0, s1
; CHECK-NEXT: addi sp, s0, -32
; CHECK-NEXT: .cfi_def_cfa sp, 32
; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: .cfi_restore s0
; CHECK-NEXT: .cfi_restore s1
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB0_4: # %ehcleanup
; CHECK-NEXT: .cfi_restore_state
; CHECK-NEXT: call _Unwind_Resume
entry:
invoke void @_Z3fooiiiiiiiiiiPi(i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 poison, i32 poison, i32 poison)
Expand Down
60 changes: 60 additions & 0 deletions llvm/test/CodeGen/RISCV/nontemporal.ll
Original file line number Diff line number Diff line change
Expand Up @@ -957,7 +957,10 @@ define void @test_nontemporal_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV64-NEXT: sb a5, 3(a0)
; CHECK-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: .cfi_restore s0
; CHECK-RV64-NEXT: .cfi_restore s1
; CHECK-RV64-NEXT: addi sp, sp, 16
; CHECK-RV64-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV64-NEXT: ret
;
; CHECK-RV32-LABEL: test_nontemporal_store_v16i8:
Expand Down Expand Up @@ -1018,7 +1021,10 @@ define void @test_nontemporal_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV32-NEXT: sb a5, 3(a0)
; CHECK-RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: .cfi_restore s0
; CHECK-RV32-NEXT: .cfi_restore s1
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64C-LABEL: test_nontemporal_store_v16i8:
Expand Down Expand Up @@ -1079,7 +1085,10 @@ define void @test_nontemporal_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV64C-NEXT: sb t1, 3(a0)
; CHECK-RV64C-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; CHECK-RV64C-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; CHECK-RV64C-NEXT: .cfi_restore s0
; CHECK-RV64C-NEXT: .cfi_restore s1
; CHECK-RV64C-NEXT: addi sp, sp, 16
; CHECK-RV64C-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV64C-NEXT: ret
;
; CHECK-RV32C-LABEL: test_nontemporal_store_v16i8:
Expand Down Expand Up @@ -1140,7 +1149,10 @@ define void @test_nontemporal_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV32C-NEXT: sb t1, 3(a0)
; CHECK-RV32C-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; CHECK-RV32C-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; CHECK-RV32C-NEXT: .cfi_restore s0
; CHECK-RV32C-NEXT: .cfi_restore s1
; CHECK-RV32C-NEXT: addi sp, sp, 16
; CHECK-RV32C-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32C-NEXT: ret
;
; CHECK-RV64V-LABEL: test_nontemporal_store_v16i8:
Expand Down Expand Up @@ -2371,7 +2383,10 @@ define void @test_nontemporal_P1_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV64-NEXT: sb a5, 3(a0)
; CHECK-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: .cfi_restore s0
; CHECK-RV64-NEXT: .cfi_restore s1
; CHECK-RV64-NEXT: addi sp, sp, 16
; CHECK-RV64-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV64-NEXT: ret
;
; CHECK-RV32-LABEL: test_nontemporal_P1_store_v16i8:
Expand Down Expand Up @@ -2432,7 +2447,10 @@ define void @test_nontemporal_P1_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV32-NEXT: sb a5, 3(a0)
; CHECK-RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: .cfi_restore s0
; CHECK-RV32-NEXT: .cfi_restore s1
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64C-LABEL: test_nontemporal_P1_store_v16i8:
Expand Down Expand Up @@ -2493,7 +2511,10 @@ define void @test_nontemporal_P1_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV64C-NEXT: sb t1, 3(a0)
; CHECK-RV64C-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; CHECK-RV64C-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; CHECK-RV64C-NEXT: .cfi_restore s0
; CHECK-RV64C-NEXT: .cfi_restore s1
; CHECK-RV64C-NEXT: addi sp, sp, 16
; CHECK-RV64C-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV64C-NEXT: ret
;
; CHECK-RV32C-LABEL: test_nontemporal_P1_store_v16i8:
Expand Down Expand Up @@ -2554,7 +2575,10 @@ define void @test_nontemporal_P1_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV32C-NEXT: sb t1, 3(a0)
; CHECK-RV32C-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; CHECK-RV32C-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; CHECK-RV32C-NEXT: .cfi_restore s0
; CHECK-RV32C-NEXT: .cfi_restore s1
; CHECK-RV32C-NEXT: addi sp, sp, 16
; CHECK-RV32C-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32C-NEXT: ret
;
; CHECK-RV64V-LABEL: test_nontemporal_P1_store_v16i8:
Expand Down Expand Up @@ -3785,7 +3809,10 @@ define void @test_nontemporal_PALL_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV64-NEXT: sb a5, 3(a0)
; CHECK-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: .cfi_restore s0
; CHECK-RV64-NEXT: .cfi_restore s1
; CHECK-RV64-NEXT: addi sp, sp, 16
; CHECK-RV64-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV64-NEXT: ret
;
; CHECK-RV32-LABEL: test_nontemporal_PALL_store_v16i8:
Expand Down Expand Up @@ -3846,7 +3873,10 @@ define void @test_nontemporal_PALL_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV32-NEXT: sb a5, 3(a0)
; CHECK-RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: .cfi_restore s0
; CHECK-RV32-NEXT: .cfi_restore s1
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64C-LABEL: test_nontemporal_PALL_store_v16i8:
Expand Down Expand Up @@ -3907,7 +3937,10 @@ define void @test_nontemporal_PALL_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV64C-NEXT: sb t1, 3(a0)
; CHECK-RV64C-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; CHECK-RV64C-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; CHECK-RV64C-NEXT: .cfi_restore s0
; CHECK-RV64C-NEXT: .cfi_restore s1
; CHECK-RV64C-NEXT: addi sp, sp, 16
; CHECK-RV64C-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV64C-NEXT: ret
;
; CHECK-RV32C-LABEL: test_nontemporal_PALL_store_v16i8:
Expand Down Expand Up @@ -3968,7 +4001,10 @@ define void @test_nontemporal_PALL_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV32C-NEXT: sb t1, 3(a0)
; CHECK-RV32C-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; CHECK-RV32C-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; CHECK-RV32C-NEXT: .cfi_restore s0
; CHECK-RV32C-NEXT: .cfi_restore s1
; CHECK-RV32C-NEXT: addi sp, sp, 16
; CHECK-RV32C-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32C-NEXT: ret
;
; CHECK-RV64V-LABEL: test_nontemporal_PALL_store_v16i8:
Expand Down Expand Up @@ -5199,7 +5235,10 @@ define void @test_nontemporal_S1_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV64-NEXT: sb a5, 3(a0)
; CHECK-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: .cfi_restore s0
; CHECK-RV64-NEXT: .cfi_restore s1
; CHECK-RV64-NEXT: addi sp, sp, 16
; CHECK-RV64-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV64-NEXT: ret
;
; CHECK-RV32-LABEL: test_nontemporal_S1_store_v16i8:
Expand Down Expand Up @@ -5260,7 +5299,10 @@ define void @test_nontemporal_S1_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV32-NEXT: sb a5, 3(a0)
; CHECK-RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: .cfi_restore s0
; CHECK-RV32-NEXT: .cfi_restore s1
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64C-LABEL: test_nontemporal_S1_store_v16i8:
Expand Down Expand Up @@ -5321,7 +5363,10 @@ define void @test_nontemporal_S1_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV64C-NEXT: sb t1, 3(a0)
; CHECK-RV64C-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; CHECK-RV64C-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; CHECK-RV64C-NEXT: .cfi_restore s0
; CHECK-RV64C-NEXT: .cfi_restore s1
; CHECK-RV64C-NEXT: addi sp, sp, 16
; CHECK-RV64C-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV64C-NEXT: ret
;
; CHECK-RV32C-LABEL: test_nontemporal_S1_store_v16i8:
Expand Down Expand Up @@ -5382,7 +5427,10 @@ define void @test_nontemporal_S1_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV32C-NEXT: sb t1, 3(a0)
; CHECK-RV32C-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; CHECK-RV32C-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; CHECK-RV32C-NEXT: .cfi_restore s0
; CHECK-RV32C-NEXT: .cfi_restore s1
; CHECK-RV32C-NEXT: addi sp, sp, 16
; CHECK-RV32C-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32C-NEXT: ret
;
; CHECK-RV64V-LABEL: test_nontemporal_S1_store_v16i8:
Expand Down Expand Up @@ -6613,7 +6661,10 @@ define void @test_nontemporal_ALL_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV64-NEXT: sb a5, 3(a0)
; CHECK-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: .cfi_restore s0
; CHECK-RV64-NEXT: .cfi_restore s1
; CHECK-RV64-NEXT: addi sp, sp, 16
; CHECK-RV64-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV64-NEXT: ret
;
; CHECK-RV32-LABEL: test_nontemporal_ALL_store_v16i8:
Expand Down Expand Up @@ -6674,7 +6725,10 @@ define void @test_nontemporal_ALL_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV32-NEXT: sb a5, 3(a0)
; CHECK-RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: .cfi_restore s0
; CHECK-RV32-NEXT: .cfi_restore s1
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64C-LABEL: test_nontemporal_ALL_store_v16i8:
Expand Down Expand Up @@ -6735,7 +6789,10 @@ define void @test_nontemporal_ALL_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV64C-NEXT: sb t1, 3(a0)
; CHECK-RV64C-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; CHECK-RV64C-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; CHECK-RV64C-NEXT: .cfi_restore s0
; CHECK-RV64C-NEXT: .cfi_restore s1
; CHECK-RV64C-NEXT: addi sp, sp, 16
; CHECK-RV64C-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV64C-NEXT: ret
;
; CHECK-RV32C-LABEL: test_nontemporal_ALL_store_v16i8:
Expand Down Expand Up @@ -6796,7 +6853,10 @@ define void @test_nontemporal_ALL_store_v16i8(ptr %p, <16 x i8> %v) {
; CHECK-RV32C-NEXT: sb t1, 3(a0)
; CHECK-RV32C-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; CHECK-RV32C-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; CHECK-RV32C-NEXT: .cfi_restore s0
; CHECK-RV32C-NEXT: .cfi_restore s1
; CHECK-RV32C-NEXT: addi sp, sp, 16
; CHECK-RV32C-NEXT: .cfi_def_cfa_offset 0
; CHECK-RV32C-NEXT: ret
;
; CHECK-RV64V-LABEL: test_nontemporal_ALL_store_v16i8:
Expand Down
23 changes: 23 additions & 0 deletions llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,11 @@ define i64 @uaddo6_xor_multi_use(i64 %a, i64 %b) {
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: .cfi_restore s1
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: uaddo6_xor_multi_use:
Expand All @@ -478,7 +482,10 @@ define i64 @uaddo6_xor_multi_use(i64 %a, i64 %b) {
; RV64-NEXT: mv a0, s0
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
%x = xor i64 -1, %a
%cmp = icmp ult i64 %x, %b
Expand Down Expand Up @@ -1117,7 +1124,16 @@ define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) {
; RV32-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: .cfi_restore s1
; RV32-NEXT: .cfi_restore s2
; RV32-NEXT: .cfi_restore s3
; RV32-NEXT: .cfi_restore s4
; RV32-NEXT: .cfi_restore s5
; RV32-NEXT: .cfi_restore s6
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: usubo_ult_cmp_dominates_i64:
Expand Down Expand Up @@ -1161,7 +1177,14 @@ define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) {
; RV64-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s3, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s4, 0(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: .cfi_restore s1
; RV64-NEXT: .cfi_restore s2
; RV64-NEXT: .cfi_restore s3
; RV64-NEXT: .cfi_restore s4
; RV64-NEXT: addi sp, sp, 48
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
entry:
br i1 %cond, label %t, label %f
Expand Down
1 change: 1 addition & 0 deletions llvm/test/CodeGen/RISCV/pr58025.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ define void @f() {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
BB:
%B = fdiv <1 x float> <float 0.5>, <float 0.5>
Expand Down
4 changes: 4 additions & 0 deletions llvm/test/CodeGen/RISCV/pr58286.ll
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ define void @func() {
; RV64I-NEXT: lui a0, 1
; RV64I-NEXT: addiw a0, a0, 16
; RV64I-NEXT: add sp, sp, a0
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
;
; RV32I-LABEL: func:
Expand Down Expand Up @@ -92,6 +93,7 @@ define void @func() {
; RV32I-NEXT: lui a0, 1
; RV32I-NEXT: addi a0, a0, 16
; RV32I-NEXT: add sp, sp, a0
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
%space = alloca i32, align 4
%stackspace = alloca[1024 x i32], align 4
Expand Down Expand Up @@ -180,6 +182,7 @@ define void @shrink_wrap(i1 %c) {
; RV64I-NEXT: lui a0, 1
; RV64I-NEXT: addiw a0, a0, 16
; RV64I-NEXT: add sp, sp, a0
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: .LBB1_2: # %foo
; RV64I-NEXT: ret
;
Expand Down Expand Up @@ -229,6 +232,7 @@ define void @shrink_wrap(i1 %c) {
; RV32I-NEXT: lui a0, 1
; RV32I-NEXT: addi a0, a0, 16
; RV32I-NEXT: add sp, sp, a0
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: .LBB1_2: # %foo
; RV32I-NEXT: ret
%space = alloca i32, align 4
Expand Down
1 change: 1 addition & 0 deletions llvm/test/CodeGen/RISCV/pr63365.ll
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ define void @f() {
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
BB:
%A1 = alloca ptr, align 8
Expand Down
29 changes: 29 additions & 0 deletions llvm/test/CodeGen/RISCV/pr69586.ll
Original file line number Diff line number Diff line change
Expand Up @@ -763,6 +763,7 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: li a1, 6
; NOREMAT-NEXT: mul a0, a0, a1
; NOREMAT-NEXT: add sp, sp, a0
; NOREMAT-NEXT: .cfi_def_cfa sp, 400
; NOREMAT-NEXT: ld ra, 392(sp) # 8-byte Folded Reload
; NOREMAT-NEXT: ld s0, 384(sp) # 8-byte Folded Reload
; NOREMAT-NEXT: ld s1, 376(sp) # 8-byte Folded Reload
Expand All @@ -776,7 +777,21 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; NOREMAT-NEXT: ld s9, 312(sp) # 8-byte Folded Reload
; NOREMAT-NEXT: ld s10, 304(sp) # 8-byte Folded Reload
; NOREMAT-NEXT: ld s11, 296(sp) # 8-byte Folded Reload
; NOREMAT-NEXT: .cfi_restore ra
; NOREMAT-NEXT: .cfi_restore s0
; NOREMAT-NEXT: .cfi_restore s1
; NOREMAT-NEXT: .cfi_restore s2
; NOREMAT-NEXT: .cfi_restore s3
; NOREMAT-NEXT: .cfi_restore s4
; NOREMAT-NEXT: .cfi_restore s5
; NOREMAT-NEXT: .cfi_restore s6
; NOREMAT-NEXT: .cfi_restore s7
; NOREMAT-NEXT: .cfi_restore s8
; NOREMAT-NEXT: .cfi_restore s9
; NOREMAT-NEXT: .cfi_restore s10
; NOREMAT-NEXT: .cfi_restore s11
; NOREMAT-NEXT: addi sp, sp, 400
; NOREMAT-NEXT: .cfi_def_cfa_offset 0
; NOREMAT-NEXT: ret
;
; REMAT-LABEL: test:
Expand Down Expand Up @@ -1533,7 +1548,21 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; REMAT-NEXT: ld s9, 24(sp) # 8-byte Folded Reload
; REMAT-NEXT: ld s10, 16(sp) # 8-byte Folded Reload
; REMAT-NEXT: ld s11, 8(sp) # 8-byte Folded Reload
; REMAT-NEXT: .cfi_restore ra
; REMAT-NEXT: .cfi_restore s0
; REMAT-NEXT: .cfi_restore s1
; REMAT-NEXT: .cfi_restore s2
; REMAT-NEXT: .cfi_restore s3
; REMAT-NEXT: .cfi_restore s4
; REMAT-NEXT: .cfi_restore s5
; REMAT-NEXT: .cfi_restore s6
; REMAT-NEXT: .cfi_restore s7
; REMAT-NEXT: .cfi_restore s8
; REMAT-NEXT: .cfi_restore s9
; REMAT-NEXT: .cfi_restore s10
; REMAT-NEXT: .cfi_restore s11
; REMAT-NEXT: addi sp, sp, 112
; REMAT-NEXT: .cfi_def_cfa_offset 0
; REMAT-NEXT: ret
%4 = tail call i64 @llvm.riscv.vsetvli.i64(i64 32, i64 2, i64 1)
%5 = tail call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.i64(<vscale x 4 x i32> poison, ptr %0, i64 %4)
Expand Down
3 changes: 3 additions & 0 deletions llvm/test/CodeGen/RISCV/pr88365.ll
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,11 @@ define void @foo() {
; CHECK-NEXT: call use
; CHECK-NEXT: li a0, -2048
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: .cfi_def_cfa_offset 2032
; CHECK-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: addi sp, sp, 2032
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%1 = alloca [1073741818 x i32], align 4
call void @use(ptr %1)
Expand Down
80 changes: 80 additions & 0 deletions llvm/test/CodeGen/RISCV/prolog-epilogue.ll
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@ define void @frame_16b() {
; RV32-NEXT: li a0, 0
; RV32-NEXT: call callee
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: frame_16b:
Expand All @@ -32,7 +34,9 @@ define void @frame_16b() {
; RV64-NEXT: li a0, 0
; RV64-NEXT: call callee
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
call void @callee(ptr null)
ret void
Expand All @@ -48,7 +52,9 @@ define void @frame_1024b() {
; RV32-NEXT: addi a0, sp, 12
; RV32-NEXT: call callee
; RV32-NEXT: lw ra, 1020(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 1024
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: frame_1024b:
Expand All @@ -60,7 +66,9 @@ define void @frame_1024b() {
; RV64-NEXT: addi a0, sp, 8
; RV64-NEXT: call callee
; RV64-NEXT: ld ra, 1016(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: addi sp, sp, 1024
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
%a = alloca [1008 x i8]
call void @callee(ptr %a)
Expand All @@ -79,8 +87,11 @@ define void @frame_2048b() {
; RV32-NEXT: addi a0, sp, 12
; RV32-NEXT: call callee
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 2032
; RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 2032
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: frame_2048b:
Expand All @@ -94,8 +105,11 @@ define void @frame_2048b() {
; RV64-NEXT: addi a0, sp, 8
; RV64-NEXT: call callee
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: .cfi_def_cfa_offset 2032
; RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: addi sp, sp, 2032
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
%a = alloca [2032 x i8]
call void @callee(ptr %a)
Expand All @@ -116,8 +130,11 @@ define void @frame_4096b() {
; RV32-NEXT: call callee
; RV32-NEXT: addi sp, sp, 2032
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: .cfi_def_cfa_offset 2032
; RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 2032
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: frame_4096b:
Expand All @@ -133,8 +150,11 @@ define void @frame_4096b() {
; RV64-NEXT: call callee
; RV64-NEXT: addi sp, sp, 2032
; RV64-NEXT: addi sp, sp, 32
; RV64-NEXT: .cfi_def_cfa_offset 2032
; RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: addi sp, sp, 2032
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
%a = alloca [4080 x i8]
call void @callee(ptr %a)
Expand All @@ -156,8 +176,11 @@ define void @frame_4kb() {
; RV32-NEXT: call callee
; RV32-NEXT: lui a0, 1
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa_offset 2032
; RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 2032
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: frame_4kb:
Expand All @@ -173,8 +196,11 @@ define void @frame_4kb() {
; RV64-NEXT: call callee
; RV64-NEXT: lui a0, 1
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: .cfi_def_cfa_offset 2032
; RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: addi sp, sp, 2032
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
%a = alloca [6112 x i8]
call void @callee(ptr %a)
Expand All @@ -197,8 +223,11 @@ define void @frame_4kb_offset_128() {
; RV32I-NEXT: lui a0, 1
; RV32I-NEXT: addi a0, a0, 128
; RV32I-NEXT: add sp, sp, a0
; RV32I-NEXT: .cfi_def_cfa_offset 2032
; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32I-NEXT: .cfi_restore ra
; RV32I-NEXT: addi sp, sp, 2032
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV32ZBA-LABEL: frame_4kb_offset_128:
Expand All @@ -214,8 +243,11 @@ define void @frame_4kb_offset_128() {
; RV32ZBA-NEXT: call callee
; RV32ZBA-NEXT: li a0, 528
; RV32ZBA-NEXT: sh3add sp, a0, sp
; RV32ZBA-NEXT: .cfi_def_cfa_offset 2032
; RV32ZBA-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: .cfi_restore ra
; RV32ZBA-NEXT: addi sp, sp, 2032
; RV32ZBA-NEXT: .cfi_def_cfa_offset 0
; RV32ZBA-NEXT: ret
;
; RV64I-LABEL: frame_4kb_offset_128:
Expand All @@ -233,8 +265,11 @@ define void @frame_4kb_offset_128() {
; RV64I-NEXT: lui a0, 1
; RV64I-NEXT: addiw a0, a0, 128
; RV64I-NEXT: add sp, sp, a0
; RV64I-NEXT: .cfi_def_cfa_offset 2032
; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64I-NEXT: .cfi_restore ra
; RV64I-NEXT: addi sp, sp, 2032
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: frame_4kb_offset_128:
Expand All @@ -250,8 +285,11 @@ define void @frame_4kb_offset_128() {
; RV64ZBA-NEXT: call callee
; RV64ZBA-NEXT: li a0, 528
; RV64ZBA-NEXT: sh3add sp, a0, sp
; RV64ZBA-NEXT: .cfi_def_cfa_offset 2032
; RV64ZBA-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64ZBA-NEXT: .cfi_restore ra
; RV64ZBA-NEXT: addi sp, sp, 2032
; RV64ZBA-NEXT: .cfi_def_cfa_offset 0
; RV64ZBA-NEXT: ret
%a = alloca [6240 x i8]
call void @callee(ptr %a)
Expand All @@ -274,8 +312,11 @@ define void @frame_8kb() {
; RV32-NEXT: call callee
; RV32-NEXT: lui a0, 2
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa_offset 2032
; RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 2032
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: frame_8kb:
Expand All @@ -291,8 +332,11 @@ define void @frame_8kb() {
; RV64-NEXT: call callee
; RV64-NEXT: lui a0, 2
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: .cfi_def_cfa_offset 2032
; RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: addi sp, sp, 2032
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
%a = alloca [10208 x i8]
call void @callee(ptr %a)
Expand All @@ -315,8 +359,11 @@ define void @frame_8kb_offset_128() {
; RV32I-NEXT: lui a0, 2
; RV32I-NEXT: addi a0, a0, 128
; RV32I-NEXT: add sp, sp, a0
; RV32I-NEXT: .cfi_def_cfa_offset 2032
; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32I-NEXT: .cfi_restore ra
; RV32I-NEXT: addi sp, sp, 2032
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV32ZBA-LABEL: frame_8kb_offset_128:
Expand All @@ -332,8 +379,11 @@ define void @frame_8kb_offset_128() {
; RV32ZBA-NEXT: call callee
; RV32ZBA-NEXT: li a0, 1040
; RV32ZBA-NEXT: sh3add sp, a0, sp
; RV32ZBA-NEXT: .cfi_def_cfa_offset 2032
; RV32ZBA-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: .cfi_restore ra
; RV32ZBA-NEXT: addi sp, sp, 2032
; RV32ZBA-NEXT: .cfi_def_cfa_offset 0
; RV32ZBA-NEXT: ret
;
; RV64I-LABEL: frame_8kb_offset_128:
Expand All @@ -351,8 +401,11 @@ define void @frame_8kb_offset_128() {
; RV64I-NEXT: lui a0, 2
; RV64I-NEXT: addiw a0, a0, 128
; RV64I-NEXT: add sp, sp, a0
; RV64I-NEXT: .cfi_def_cfa_offset 2032
; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64I-NEXT: .cfi_restore ra
; RV64I-NEXT: addi sp, sp, 2032
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: frame_8kb_offset_128:
Expand All @@ -368,8 +421,11 @@ define void @frame_8kb_offset_128() {
; RV64ZBA-NEXT: call callee
; RV64ZBA-NEXT: li a0, 1040
; RV64ZBA-NEXT: sh3add sp, a0, sp
; RV64ZBA-NEXT: .cfi_def_cfa_offset 2032
; RV64ZBA-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64ZBA-NEXT: .cfi_restore ra
; RV64ZBA-NEXT: addi sp, sp, 2032
; RV64ZBA-NEXT: .cfi_def_cfa_offset 0
; RV64ZBA-NEXT: ret
%a = alloca [10336 x i8]
call void @callee(ptr %a)
Expand All @@ -392,8 +448,11 @@ define void @frame_16kb_minus_80() {
; RV32I-NEXT: lui a0, 4
; RV32I-NEXT: addi a0, a0, -80
; RV32I-NEXT: add sp, sp, a0
; RV32I-NEXT: .cfi_def_cfa_offset 2032
; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32I-NEXT: .cfi_restore ra
; RV32I-NEXT: addi sp, sp, 2032
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV32ZBA-LABEL: frame_16kb_minus_80:
Expand All @@ -409,8 +468,11 @@ define void @frame_16kb_minus_80() {
; RV32ZBA-NEXT: call callee
; RV32ZBA-NEXT: li a0, 2038
; RV32ZBA-NEXT: sh3add sp, a0, sp
; RV32ZBA-NEXT: .cfi_def_cfa_offset 2032
; RV32ZBA-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32ZBA-NEXT: .cfi_restore ra
; RV32ZBA-NEXT: addi sp, sp, 2032
; RV32ZBA-NEXT: .cfi_def_cfa_offset 0
; RV32ZBA-NEXT: ret
;
; RV64I-LABEL: frame_16kb_minus_80:
Expand All @@ -428,8 +490,11 @@ define void @frame_16kb_minus_80() {
; RV64I-NEXT: lui a0, 4
; RV64I-NEXT: addiw a0, a0, -80
; RV64I-NEXT: add sp, sp, a0
; RV64I-NEXT: .cfi_def_cfa_offset 2032
; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64I-NEXT: .cfi_restore ra
; RV64I-NEXT: addi sp, sp, 2032
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: frame_16kb_minus_80:
Expand All @@ -445,8 +510,11 @@ define void @frame_16kb_minus_80() {
; RV64ZBA-NEXT: call callee
; RV64ZBA-NEXT: li a0, 2038
; RV64ZBA-NEXT: sh3add sp, a0, sp
; RV64ZBA-NEXT: .cfi_def_cfa_offset 2032
; RV64ZBA-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64ZBA-NEXT: .cfi_restore ra
; RV64ZBA-NEXT: addi sp, sp, 2032
; RV64ZBA-NEXT: .cfi_def_cfa_offset 0
; RV64ZBA-NEXT: ret
%a = alloca [18320 x i8]
call void @callee(ptr %a)
Expand All @@ -468,8 +536,11 @@ define void @frame_16kb() {
; RV32-NEXT: call callee
; RV32-NEXT: lui a0, 4
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa_offset 2032
; RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 2032
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: frame_16kb:
Expand All @@ -485,8 +556,11 @@ define void @frame_16kb() {
; RV64-NEXT: call callee
; RV64-NEXT: lui a0, 4
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: .cfi_def_cfa_offset 2032
; RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: addi sp, sp, 2032
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
%a = alloca [18400 x i8]
call void @callee(ptr %a)
Expand All @@ -508,8 +582,11 @@ define void @frame_32kb() {
; RV32-NEXT: call callee
; RV32-NEXT: lui a0, 8
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa_offset 2032
; RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: addi sp, sp, 2032
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: frame_32kb:
Expand All @@ -525,8 +602,11 @@ define void @frame_32kb() {
; RV64-NEXT: call callee
; RV64-NEXT: lui a0, 8
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: .cfi_def_cfa_offset 2032
; RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: addi sp, sp, 2032
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
%a = alloca [34784 x i8]
call void @callee(ptr %a)
Expand Down
4 changes: 4 additions & 0 deletions llvm/test/CodeGen/RISCV/push-pop-opt-crash.ll
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ define dso_local void @f0() local_unnamed_addr {
; RV32IZCMP-NEXT: .cfi_offset ra, -4
; RV32IZCMP-NEXT: call f1
; RV32IZCMP-NEXT: cm.pop {ra}, 16
; RV32IZCMP-NEXT: .cfi_restore ra
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 0
; RV32IZCMP-NEXT: .LBB0_2: # %if.F
; RV32IZCMP-NEXT: tail f2
; RV32IZCMP-NEXT: .Lfunc_end0:
Expand All @@ -36,6 +38,8 @@ define dso_local void @f0() local_unnamed_addr {
; RV64IZCMP-NEXT: .cfi_offset ra, -8
; RV64IZCMP-NEXT: call f1
; RV64IZCMP-NEXT: cm.pop {ra}, 16
; RV64IZCMP-NEXT: .cfi_restore ra
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 0
; RV64IZCMP-NEXT: .LBB0_2: # %if.F
; RV64IZCMP-NEXT: tail f2
; RV64IZCMP-NEXT: .Lfunc_end0:
Expand Down
182 changes: 178 additions & 4 deletions llvm/test/CodeGen/RISCV/push-pop-popret.ll

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,13 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: .cfi_def_cfa sp, 32
; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: .cfi_restore s0
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; SUBREGLIVENESS-LABEL: last_chance_recoloring_failure:
Expand Down Expand Up @@ -118,9 +122,13 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: csrr a0, vlenb
; SUBREGLIVENESS-NEXT: slli a0, a0, 4
; SUBREGLIVENESS-NEXT: add sp, sp, a0
; SUBREGLIVENESS-NEXT: .cfi_def_cfa sp, 32
; SUBREGLIVENESS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; SUBREGLIVENESS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; SUBREGLIVENESS-NEXT: .cfi_restore ra
; SUBREGLIVENESS-NEXT: .cfi_restore s0
; SUBREGLIVENESS-NEXT: addi sp, sp, 32
; SUBREGLIVENESS-NEXT: .cfi_def_cfa_offset 0
; SUBREGLIVENESS-NEXT: ret
entry:
%i = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) undef, ptr nonnull poison, <vscale x 16 x i32> poison, i64 55, i64 4)
Expand Down
4 changes: 3 additions & 1 deletion llvm/test/CodeGen/RISCV/rv64-patchpoint.ll
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,10 @@ define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
; CHECK-NEXT: mv a0, s1
; CHECK-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore s0
; CHECK-NEXT: .cfi_restore s1
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
entry:
%resolveCall2 = inttoptr i64 244837814094590 to i8*
Expand All @@ -49,7 +52,6 @@ entry:
define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
; CHECK-LABEL: small_patchpoint_codegen:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: .Ltmp2:
; CHECK-NEXT: nop
; CHECK-NEXT: nop
Expand Down
1 change: 0 additions & 1 deletion llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
define void @test_shadow_optimization() {
; CHECK-LABEL: test_shadow_optimization:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: .Ltmp0:
; CHECK-NEXT: nop
; CHECK-NEXT: nop
Expand Down
21 changes: 21 additions & 0 deletions llvm/test/CodeGen/RISCV/rv64-statepoint-call-lowering.ll
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@ define i1 @test_i1_return() gc "statepoint-example" {
; CHECK-NEXT: call return_i1
; CHECK-NEXT: .Ltmp0:
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
; This is just checking that a i1 gets lowered normally when there's no extra
; state arguments to the statepoint
Expand All @@ -45,7 +47,9 @@ define i32 @test_i32_return() gc "statepoint-example" {
; CHECK-NEXT: call return_i32
; CHECK-NEXT: .Ltmp1:
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
entry:
%safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i32 ()) @return_i32, i32 0, i32 0, i32 0, i32 0)
Expand All @@ -63,7 +67,9 @@ define ptr @test_i32ptr_return() gc "statepoint-example" {
; CHECK-NEXT: call return_i32ptr
; CHECK-NEXT: .Ltmp2:
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
entry:
%safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(ptr ()) @return_i32ptr, i32 0, i32 0, i32 0, i32 0)
Expand All @@ -81,7 +87,9 @@ define float @test_float_return() gc "statepoint-example" {
; CHECK-NEXT: call return_float
; CHECK-NEXT: .Ltmp3:
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
entry:
%safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(float ()) @return_float, i32 0, i32 0, i32 0, i32 0)
Expand All @@ -99,7 +107,9 @@ define %struct @test_struct_return() gc "statepoint-example" {
; CHECK-NEXT: call return_struct
; CHECK-NEXT: .Ltmp4:
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
entry:
%safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(%struct ()) @return_struct, i32 0, i32 0, i32 0, i32 0)
Expand All @@ -118,7 +128,9 @@ define i1 @test_relocate(ptr addrspace(1) %a) gc "statepoint-example" {
; CHECK-NEXT: call return_i1
; CHECK-NEXT: .Ltmp5:
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
; Check that an ununsed relocate has no code-generation impact
entry:
Expand All @@ -140,7 +152,9 @@ define void @test_void_vararg() gc "statepoint-example" {
; CHECK-NEXT: call varargf
; CHECK-NEXT: .Ltmp6:
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
; Check a statepoint wrapping a *ptr returning vararg function works
entry:
Expand All @@ -160,7 +174,9 @@ define i1 @test_i1_return_patchable() gc "statepoint-example" {
; CHECK-NEXT: nop
; CHECK-NEXT: .Ltmp7:
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
; A patchable variant of test_i1_return
entry:
Expand Down Expand Up @@ -197,7 +213,10 @@ define i1 @test_cross_bb(ptr addrspace(1) %a, i1 %external_cond) gc "statepoint-
; CHECK-NEXT: .LBB8_3: # %right
; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: .cfi_restore s0
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
entry:
%safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %a)]
Expand Down Expand Up @@ -237,7 +256,9 @@ define void @test_attributes(ptr byval(%struct2) %s) gc "statepoint-example" {
; CHECK-NEXT: call consume_attributes
; CHECK-NEXT: .Ltmp9:
; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; CHECK-NEXT: .cfi_restore ra
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
entry:
; Check that arguments with attributes are lowered correctly.
Expand Down
20 changes: 20 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv-cfi-info.ll
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,16 @@ define riscv_vector_cc <vscale x 1 x i32> @test_vector_callee_cfi(<vscale x 1 x
; OMIT-FP-NEXT: slli a1, a0, 3
; OMIT-FP-NEXT: sub a0, a1, a0
; OMIT-FP-NEXT: add sp, sp, a0
; OMIT-FP-NEXT: .cfi_def_cfa sp, 16
; OMIT-FP-NEXT: .cfi_restore v1
; OMIT-FP-NEXT: .cfi_restore v2
; OMIT-FP-NEXT: .cfi_restore v3
; OMIT-FP-NEXT: .cfi_restore v4
; OMIT-FP-NEXT: .cfi_restore v5
; OMIT-FP-NEXT: .cfi_restore v6
; OMIT-FP-NEXT: .cfi_restore v7
; OMIT-FP-NEXT: addi sp, sp, 16
; OMIT-FP-NEXT: .cfi_def_cfa_offset 0
; OMIT-FP-NEXT: ret
;
; NO-OMIT-FP-LABEL: test_vector_callee_cfi:
Expand Down Expand Up @@ -111,10 +120,21 @@ define riscv_vector_cc <vscale x 1 x i32> @test_vector_callee_cfi(<vscale x 1 x
; NO-OMIT-FP-NEXT: sub a0, s0, a0
; NO-OMIT-FP-NEXT: addi a0, a0, -32
; NO-OMIT-FP-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
; NO-OMIT-FP-NEXT: .cfi_restore v1
; NO-OMIT-FP-NEXT: .cfi_restore v2
; NO-OMIT-FP-NEXT: .cfi_restore v3
; NO-OMIT-FP-NEXT: .cfi_restore v4
; NO-OMIT-FP-NEXT: .cfi_restore v5
; NO-OMIT-FP-NEXT: .cfi_restore v6
; NO-OMIT-FP-NEXT: .cfi_restore v7
; NO-OMIT-FP-NEXT: addi sp, s0, -32
; NO-OMIT-FP-NEXT: .cfi_def_cfa sp, 32
; NO-OMIT-FP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; NO-OMIT-FP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; NO-OMIT-FP-NEXT: .cfi_restore ra
; NO-OMIT-FP-NEXT: .cfi_restore s0
; NO-OMIT-FP-NEXT: addi sp, sp, 32
; NO-OMIT-FP-NEXT: .cfi_def_cfa_offset 0
; NO-OMIT-FP-NEXT: ret
entry:
call void asm sideeffect "",
Expand Down
2 changes: 2 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -603,7 +603,9 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x i64> %v
Expand Down
3 changes: 3 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ define <vscale x 1 x i64> @access_fixed_object(ptr %val) {
; RV64IV-NEXT: ld a1, 520(sp)
; RV64IV-NEXT: sd a1, 0(a0)
; RV64IV-NEXT: addi sp, sp, 528
; RV64IV-NEXT: .cfi_def_cfa_offset 0
; RV64IV-NEXT: ret
%local = alloca i64
%array = alloca [64 x i64]
Expand Down Expand Up @@ -44,7 +45,9 @@ define <vscale x 1 x i64> @access_fixed_and_vector_objects(ptr %val) {
; RV64IV-NEXT: vadd.vv v8, v8, v9
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: add sp, sp, a0
; RV64IV-NEXT: .cfi_def_cfa sp, 528
; RV64IV-NEXT: addi sp, sp, 528
; RV64IV-NEXT: .cfi_def_cfa_offset 0
; RV64IV-NEXT: ret
%local = alloca i64
%vector = alloca <vscale x 1 x i64>
Expand Down
4 changes: 4 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,13 @@ body: |
; CHECK-NEXT: $x10 = ADDI killed $x10, -224
; CHECK-NEXT: VS1R_V killed renamable $v8, killed renamable $x10
; CHECK-NEXT: $x2 = frame-destroy ADDI $x8, -2032
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $x2, 2032
; CHECK-NEXT: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3)
; CHECK-NEXT: $x8 = LD $x2, 2016 :: (load (s64) from %stack.4)
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x1
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $x8
; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 2032
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
; CHECK-NEXT: PseudoRET
%1:gprnox0 = COPY $x11
%0:gpr = COPY $x10
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,9 @@ define void @test(ptr %addr) {
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%ret = alloca %my_type, align 8
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,9 @@ define <vscale x 1 x double> @test(ptr %addr, i64 %vl) {
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%ret = alloca %struct.test, align 8
Expand Down
6 changes: 6 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/alloca-load-store-vector-tuple.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,9 @@ define target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @load_store_m1x5(targe
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%tuple.addr = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 1
Expand Down Expand Up @@ -67,7 +69,9 @@ define target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @load_store_m2x2(targ
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%tuple.addr = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 1
Expand Down Expand Up @@ -100,7 +104,9 @@ define target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @load_store_m4x2(targ
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%tuple.addr = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 1
Expand Down
5 changes: 5 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/binop-splats.ll
Original file line number Diff line number Diff line change
Expand Up @@ -406,6 +406,7 @@ define <vscale x 1 x i64> @nxv1i64(i64 %x, i64 %y) {
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v8, (a0), zero
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: nxv1i64:
Expand Down Expand Up @@ -437,6 +438,7 @@ define <vscale x 2 x i64> @nxv2i64(i64 %x, i64 %y) {
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v8, (a0), zero
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: nxv2i64:
Expand Down Expand Up @@ -468,6 +470,7 @@ define <vscale x 4 x i64> @nxv4i64(i64 %x, i64 %y) {
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v8, (a0), zero
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: nxv4i64:
Expand Down Expand Up @@ -499,6 +502,7 @@ define <vscale x 8 x i64> @nxv8i64(i64 %x, i64 %y) {
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v8, (a0), zero
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: nxv8i64:
Expand Down Expand Up @@ -591,6 +595,7 @@ define <vscale x 1 x i64> @uaddsatnxv1i64(i64 %x, i64 %y) {
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: uaddsatnxv1i64:
Expand Down
5 changes: 5 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -769,6 +769,7 @@ define <vscale x 1 x i64> @bitreverse_nxv1i64(<vscale x 1 x i64> %va) {
; RV32-NEXT: vadd.vv v8, v8, v8
; RV32-NEXT: vor.vv v8, v9, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: bitreverse_nxv1i64:
Expand Down Expand Up @@ -909,6 +910,7 @@ define <vscale x 2 x i64> @bitreverse_nxv2i64(<vscale x 2 x i64> %va) {
; RV32-NEXT: vadd.vv v8, v8, v8
; RV32-NEXT: vor.vv v8, v10, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: bitreverse_nxv2i64:
Expand Down Expand Up @@ -1049,6 +1051,7 @@ define <vscale x 4 x i64> @bitreverse_nxv4i64(<vscale x 4 x i64> %va) {
; RV32-NEXT: vadd.vv v8, v8, v8
; RV32-NEXT: vor.vv v8, v12, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: bitreverse_nxv4i64:
Expand Down Expand Up @@ -1202,7 +1205,9 @@ define <vscale x 8 x i64> @bitreverse_nxv8i64(<vscale x 8 x i64> %va) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: bitreverse_nxv8i64:
Expand Down
20 changes: 20 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1502,6 +1502,7 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64(<vscale x 1 x i64> %va, <vscale
; RV32-NEXT: vsll.vi v8, v8, 1, v0.t
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bitreverse_nxv1i64:
Expand Down Expand Up @@ -1643,6 +1644,7 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64_unmasked(<vscale x 1 x i64> %va
; RV32-NEXT: vadd.vv v8, v8, v8
; RV32-NEXT: vor.vv v8, v9, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bitreverse_nxv1i64_unmasked:
Expand Down Expand Up @@ -1786,6 +1788,7 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64(<vscale x 2 x i64> %va, <vscale
; RV32-NEXT: vsll.vi v8, v8, 1, v0.t
; RV32-NEXT: vor.vv v8, v10, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bitreverse_nxv2i64:
Expand Down Expand Up @@ -1927,6 +1930,7 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64_unmasked(<vscale x 2 x i64> %va
; RV32-NEXT: vadd.vv v8, v8, v8
; RV32-NEXT: vor.vv v8, v10, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bitreverse_nxv2i64_unmasked:
Expand Down Expand Up @@ -2070,6 +2074,7 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64(<vscale x 4 x i64> %va, <vscale
; RV32-NEXT: vsll.vi v8, v8, 1, v0.t
; RV32-NEXT: vor.vv v8, v12, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bitreverse_nxv4i64:
Expand Down Expand Up @@ -2211,6 +2216,7 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64_unmasked(<vscale x 4 x i64> %va
; RV32-NEXT: vadd.vv v8, v8, v8
; RV32-NEXT: vor.vv v8, v12, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bitreverse_nxv4i64_unmasked:
Expand Down Expand Up @@ -2399,7 +2405,9 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bitreverse_nxv7i64:
Expand Down Expand Up @@ -2476,7 +2484,9 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
; CHECK-ZVBB-LABEL: vp_bitreverse_nxv7i64:
Expand Down Expand Up @@ -2571,7 +2581,9 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64_unmasked(<vscale x 7 x i64> %va
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bitreverse_nxv7i64_unmasked:
Expand Down Expand Up @@ -2760,7 +2772,9 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bitreverse_nxv8i64:
Expand Down Expand Up @@ -2837,7 +2851,9 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
; CHECK-ZVBB-LABEL: vp_bitreverse_nxv8i64:
Expand Down Expand Up @@ -2932,7 +2948,9 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64_unmasked(<vscale x 8 x i64> %va
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bitreverse_nxv8i64_unmasked:
Expand Down Expand Up @@ -3092,7 +3110,9 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; CHECK-ZVBB-LABEL: vp_bitreverse_nxv64i16:
Expand Down
5 changes: 5 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,7 @@ define <vscale x 1 x i64> @bswap_nxv1i64(<vscale x 1 x i64> %va) {
; RV32-NEXT: vor.vv v8, v10, v8
; RV32-NEXT: vor.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: bswap_nxv1i64:
Expand Down Expand Up @@ -374,6 +375,7 @@ define <vscale x 2 x i64> @bswap_nxv2i64(<vscale x 2 x i64> %va) {
; RV32-NEXT: vor.vv v8, v12, v8
; RV32-NEXT: vor.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: bswap_nxv2i64:
Expand Down Expand Up @@ -457,6 +459,7 @@ define <vscale x 4 x i64> @bswap_nxv4i64(<vscale x 4 x i64> %va) {
; RV32-NEXT: vor.vv v8, v16, v8
; RV32-NEXT: vor.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: bswap_nxv4i64:
Expand Down Expand Up @@ -553,7 +556,9 @@ define <vscale x 8 x i64> @bswap_nxv8i64(<vscale x 8 x i64> %va) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: bswap_nxv8i64:
Expand Down
21 changes: 21 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -548,6 +548,7 @@ define <vscale x 1 x i64> @vp_bswap_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
; RV32-NEXT: vor.vv v8, v8, v11, v0.t
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bswap_nxv1i64:
Expand Down Expand Up @@ -632,6 +633,7 @@ define <vscale x 1 x i64> @vp_bswap_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
; RV32-NEXT: vor.vv v8, v8, v11
; RV32-NEXT: vor.vv v8, v9, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bswap_nxv1i64_unmasked:
Expand Down Expand Up @@ -718,6 +720,7 @@ define <vscale x 2 x i64> @vp_bswap_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
; RV32-NEXT: vor.vv v8, v8, v14, v0.t
; RV32-NEXT: vor.vv v8, v10, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bswap_nxv2i64:
Expand Down Expand Up @@ -802,6 +805,7 @@ define <vscale x 2 x i64> @vp_bswap_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
; RV32-NEXT: vor.vv v8, v8, v14
; RV32-NEXT: vor.vv v8, v10, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bswap_nxv2i64_unmasked:
Expand Down Expand Up @@ -888,6 +892,7 @@ define <vscale x 4 x i64> @vp_bswap_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
; RV32-NEXT: vor.vv v8, v8, v20, v0.t
; RV32-NEXT: vor.vv v8, v16, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bswap_nxv4i64:
Expand Down Expand Up @@ -972,6 +977,7 @@ define <vscale x 4 x i64> @vp_bswap_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
; RV32-NEXT: vor.vv v8, v8, v20
; RV32-NEXT: vor.vv v8, v12, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bswap_nxv4i64_unmasked:
Expand Down Expand Up @@ -1103,7 +1109,9 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bswap_nxv7i64:
Expand Down Expand Up @@ -1153,7 +1161,9 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
; CHECK-ZVKB-LABEL: vp_bswap_nxv7i64:
Expand Down Expand Up @@ -1217,7 +1227,9 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bswap_nxv7i64_unmasked:
Expand Down Expand Up @@ -1349,7 +1361,9 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bswap_nxv8i64:
Expand Down Expand Up @@ -1399,7 +1413,9 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
; CHECK-ZVKB-LABEL: vp_bswap_nxv8i64:
Expand Down Expand Up @@ -1463,7 +1479,9 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bswap_nxv8i64_unmasked:
Expand Down Expand Up @@ -1560,7 +1578,9 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; CHECK-ZVKB-LABEL: vp_bswap_nxv64i16:
Expand Down Expand Up @@ -1676,6 +1696,7 @@ define <vscale x 1 x i48> @vp_bswap_nxv1i48(<vscale x 1 x i48> %va, <vscale x 1
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vp_bswap_nxv1i48:
Expand Down
Loading