24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
Original file line number Diff line number Diff line change
Expand Up @@ -357,18 +357,18 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
; RV32-NEXT: addi a0, a0, -1463
; RV32-NEXT: andi a0, a0, 2047
; RV32-NEXT: sltiu a0, a0, 293
; RV32-NEXT: addi a1, s3, -1
; RV32-NEXT: addi s3, s3, -1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: addi a2, s1, -1
; RV32-NEXT: slli a3, a2, 21
; RV32-NEXT: srli a3, a3, 31
; RV32-NEXT: sb a3, 4(s0)
; RV32-NEXT: andi a1, a1, 2047
; RV32-NEXT: addi s1, s1, -1
; RV32-NEXT: slli a1, s1, 21
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 4(s0)
; RV32-NEXT: andi a1, s3, 2047
; RV32-NEXT: andi a0, a0, 2047
; RV32-NEXT: slli a0, a0, 11
; RV32-NEXT: or a0, a1, a0
; RV32-NEXT: slli a1, a2, 22
; RV32-NEXT: or a0, a0, a1
; RV32-NEXT: slli s1, s1, 22
; RV32-NEXT: or a0, a0, s1
; RV32-NEXT: sw a0, 0(s0)
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
Expand Down Expand Up @@ -414,11 +414,11 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
; RV64-NEXT: addiw a0, a0, -1638
; RV64-NEXT: andi a0, a0, 2047
; RV64-NEXT: sltiu a0, a0, 2
; RV64-NEXT: addiw a1, s3, -1
; RV64-NEXT: addiw s3, s3, -1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: addiw a2, s2, -1
; RV64-NEXT: andi a1, a1, 2047
; RV64-NEXT: andi a2, a2, 2047
; RV64-NEXT: addiw s2, s2, -1
; RV64-NEXT: andi a1, s3, 2047
; RV64-NEXT: andi a2, s2, 2047
; RV64-NEXT: slli a2, a2, 11
; RV64-NEXT: or a1, a1, a2
; RV64-NEXT: slli a0, a0, 22
Expand Down
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/RISCV/vararg.ll
Original file line number Diff line number Diff line change
Expand Up @@ -459,8 +459,8 @@ define void @va1_caller() nounwind {
; LP64-LP64F-LP64D-FPELIM: # %bb.0:
; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16
; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1023
; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a0, 52
; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1023
; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 52
; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 2
; LP64-LP64F-LP64D-FPELIM-NEXT: call va1@plt
; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
Expand All @@ -473,8 +473,8 @@ define void @va1_caller() nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16
; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1023
; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a0, 52
; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1023
; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 52
; LP64-LP64F-LP64D-WITHFP-NEXT: li a2, 2
; LP64-LP64F-LP64D-WITHFP-NEXT: call va1@plt
; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
Expand Down Expand Up @@ -775,8 +775,8 @@ define void @va2_caller() nounwind {
; LP64-LP64F-LP64D-FPELIM: # %bb.0:
; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16
; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1023
; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a0, 52
; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1023
; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 52
; LP64-LP64F-LP64D-FPELIM-NEXT: call va2@plt
; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16
Expand All @@ -788,8 +788,8 @@ define void @va2_caller() nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16
; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1023
; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a0, 52
; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1023
; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 52
; LP64-LP64F-LP64D-WITHFP-NEXT: call va2@plt
; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
Expand Down Expand Up @@ -1110,8 +1110,8 @@ define void @va3_caller() nounwind {
; LP64-LP64F-LP64D-FPELIM: # %bb.0:
; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16
; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1
; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a0, 62
; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 1
; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a2, 62
; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 2
; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1111
; LP64-LP64F-LP64D-FPELIM-NEXT: call va3@plt
Expand All @@ -1125,8 +1125,8 @@ define void @va3_caller() nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16
; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1
; LP64-LP64F-LP64D-WITHFP-NEXT: slli a2, a0, 62
; LP64-LP64F-LP64D-WITHFP-NEXT: li a2, 1
; LP64-LP64F-LP64D-WITHFP-NEXT: slli a2, a2, 62
; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 2
; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1111
; LP64-LP64F-LP64D-WITHFP-NEXT: call va3@plt
Expand Down Expand Up @@ -1527,8 +1527,8 @@ define void @va5_aligned_stack_caller() nounwind {
; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, %hi(.LCPI11_2)
; LP64-LP64F-LP64D-FPELIM-NEXT: ld a3, %lo(.LCPI11_2)(a0)
; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 2384
; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a0, a0, 761
; LP64-LP64F-LP64D-FPELIM-NEXT: slli a6, a0, 11
; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a6, a0, 761
; LP64-LP64F-LP64D-FPELIM-NEXT: slli a6, a6, 11
; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1
; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 11
; LP64-LP64F-LP64D-FPELIM-NEXT: li a4, 12
Expand Down Expand Up @@ -1559,8 +1559,8 @@ define void @va5_aligned_stack_caller() nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, %hi(.LCPI11_2)
; LP64-LP64F-LP64D-WITHFP-NEXT: ld a3, %lo(.LCPI11_2)(a0)
; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, 2384
; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a0, a0, 761
; LP64-LP64F-LP64D-WITHFP-NEXT: slli a6, a0, 11
; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a6, a0, 761
; LP64-LP64F-LP64D-WITHFP-NEXT: slli a6, a6, 11
; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1
; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 11
; LP64-LP64F-LP64D-WITHFP-NEXT: li a4, 12
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/xaluo.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4018,9 +4018,9 @@ define zeroext i1 @uaddo.i64.constant_2048(i64 %v1, i64* %res) {
; RV32-LABEL: uaddo.i64.constant_2048:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mv a3, a0
; RV32-NEXT: addi a0, a0, 2047
; RV32-NEXT: addi a4, a0, 1
; RV32-NEXT: sltu a0, a4, a3
; RV32-NEXT: addi a4, a0, 2047
; RV32-NEXT: addi a4, a4, 1
; RV32-NEXT: sltu a0, a4, a0
; RV32-NEXT: add a5, a1, a0
; RV32-NEXT: bgeu a4, a3, .LBB67_2
; RV32-NEXT: # %bb.1: # %entry
Expand All @@ -4041,9 +4041,9 @@ define zeroext i1 @uaddo.i64.constant_2048(i64 %v1, i64* %res) {
; RV32ZBA-LABEL: uaddo.i64.constant_2048:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mv a3, a0
; RV32ZBA-NEXT: addi a0, a0, 2047
; RV32ZBA-NEXT: addi a4, a0, 1
; RV32ZBA-NEXT: sltu a0, a4, a3
; RV32ZBA-NEXT: addi a4, a0, 2047
; RV32ZBA-NEXT: addi a4, a4, 1
; RV32ZBA-NEXT: sltu a0, a4, a0
; RV32ZBA-NEXT: add a5, a1, a0
; RV32ZBA-NEXT: bgeu a4, a3, .LBB67_2
; RV32ZBA-NEXT: # %bb.1: # %entry
Expand Down Expand Up @@ -4072,9 +4072,9 @@ define zeroext i1 @uaddo.i64.constant_2049(i64 %v1, i64* %res) {
; RV32-LABEL: uaddo.i64.constant_2049:
; RV32: # %bb.0: # %entry
; RV32-NEXT: mv a3, a0
; RV32-NEXT: addi a0, a0, 2047
; RV32-NEXT: addi a4, a0, 2
; RV32-NEXT: sltu a0, a4, a3
; RV32-NEXT: addi a4, a0, 2047
; RV32-NEXT: addi a4, a4, 2
; RV32-NEXT: sltu a0, a4, a0
; RV32-NEXT: add a5, a1, a0
; RV32-NEXT: bgeu a4, a3, .LBB68_2
; RV32-NEXT: # %bb.1: # %entry
Expand All @@ -4095,9 +4095,9 @@ define zeroext i1 @uaddo.i64.constant_2049(i64 %v1, i64* %res) {
; RV32ZBA-LABEL: uaddo.i64.constant_2049:
; RV32ZBA: # %bb.0: # %entry
; RV32ZBA-NEXT: mv a3, a0
; RV32ZBA-NEXT: addi a0, a0, 2047
; RV32ZBA-NEXT: addi a4, a0, 2
; RV32ZBA-NEXT: sltu a0, a4, a3
; RV32ZBA-NEXT: addi a4, a0, 2047
; RV32ZBA-NEXT: addi a4, a4, 2
; RV32ZBA-NEXT: sltu a0, a4, a0
; RV32ZBA-NEXT: add a5, a1, a0
; RV32ZBA-NEXT: bgeu a4, a3, .LBB68_2
; RV32ZBA-NEXT: # %bb.1: # %entry
Expand Down