137 changes: 75 additions & 62 deletions llvm/test/CodeGen/RISCV/double-convert.ll
Original file line number Diff line number Diff line change
Expand Up @@ -749,40 +749,47 @@ define i64 @fcvt_l_d(double %a) nounwind {
define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32IFD-LABEL: fcvt_l_d_sat:
; RV32IFD: # %bb.0: # %start
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: addi sp, sp, -32
; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: lui a0, %hi(.LCPI12_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
; RV32IFD-NEXT: lui a0, %hi(.LCPI12_1)
; RV32IFD-NEXT: fld fa4, %lo(.LCPI12_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: flt.d s0, fa5, fa0
; RV32IFD-NEXT: neg s1, s0
; RV32IFD-NEXT: fle.d s2, fa4, fa0
; RV32IFD-NEXT: neg s3, s2
; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: and a0, s3, a0
; RV32IFD-NEXT: or a0, s1, a0
; RV32IFD-NEXT: feq.d a2, fs0, fs0
; RV32IFD-NEXT: neg a2, a2
; RV32IFD-NEXT: lui a4, 524288
; RV32IFD-NEXT: lui a2, 524288
; RV32IFD-NEXT: beqz s0, .LBB12_2
; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
; RV32IFD-NEXT: bne s2, a5, .LBB12_2
; RV32IFD-NEXT: # %bb.1: # %start
; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: mv a3, a1
; RV32IFD-NEXT: .LBB12_2: # %start
; RV32IFD-NEXT: lui a1, %hi(.LCPI12_1)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI12_1)(a1)
; RV32IFD-NEXT: flt.d a3, fa5, fs0
; RV32IFD-NEXT: beqz a3, .LBB12_4
; RV32IFD-NEXT: and a0, a2, a0
; RV32IFD-NEXT: beqz s0, .LBB12_4
; RV32IFD-NEXT: # %bb.3:
; RV32IFD-NEXT: addi a2, a4, -1
; RV32IFD-NEXT: addi a3, a4, -1
; RV32IFD-NEXT: .LBB12_4: # %start
; RV32IFD-NEXT: feq.d a1, fs0, fs0
; RV32IFD-NEXT: neg a4, a1
; RV32IFD-NEXT: and a1, a4, a2
; RV32IFD-NEXT: neg a2, a3
; RV32IFD-NEXT: neg a3, s0
; RV32IFD-NEXT: and a0, a3, a0
; RV32IFD-NEXT: or a0, a2, a0
; RV32IFD-NEXT: and a0, a4, a0
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: and a1, a2, a3
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: addi sp, sp, 32
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: fcvt_l_d_sat:
Expand All @@ -800,40 +807,45 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI12_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI12_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI12_0)(a2)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
; RV32IZFINXZDINX-NEXT: neg s3, s2
; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI12_1)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI12_1+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI12_1)(a2)
; RV32IZFINXZDINX-NEXT: and a0, s3, a0
; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a3
; RV32IZFINXZDINX-NEXT: or a0, a2, a0
; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
; RV32IZFINXZDINX-NEXT: lui a5, 524288
; RV32IZFINXZDINX-NEXT: lui a3, 524288
; RV32IZFINXZDINX-NEXT: beqz a2, .LBB12_2
; RV32IZFINXZDINX-NEXT: li a6, 1
; RV32IZFINXZDINX-NEXT: lui a4, 524288
; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB12_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %start
; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: mv a4, a1
; RV32IZFINXZDINX-NEXT: .LBB12_2: # %start
; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI12_1)
; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI12_1)(a1)
; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI12_1+4)(a1)
; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
; RV32IZFINXZDINX-NEXT: beqz a4, .LBB12_4
; RV32IZFINXZDINX-NEXT: and a0, a2, a0
; RV32IZFINXZDINX-NEXT: beqz a3, .LBB12_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB12_4: # %start
; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
; RV32IZFINXZDINX-NEXT: neg a5, a1
; RV32IZFINXZDINX-NEXT: and a1, a5, a3
; RV32IZFINXZDINX-NEXT: neg a2, a2
; RV32IZFINXZDINX-NEXT: and a0, a2, a0
; RV32IZFINXZDINX-NEXT: neg a2, a4
; RV32IZFINXZDINX-NEXT: or a0, a2, a0
; RV32IZFINXZDINX-NEXT: and a0, a5, a0
; RV32IZFINXZDINX-NEXT: and a1, a2, a4
; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
; RV32IZFINXZDINX-NEXT: ret
;
Expand Down Expand Up @@ -1013,23 +1025,23 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: fmv.d fs0, fa0
; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: lui a0, %hi(.LCPI14_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
; RV32IFD-NEXT: flt.d a0, fa5, fa0
; RV32IFD-NEXT: neg s0, a0
; RV32IFD-NEXT: fcvt.d.w fa5, zero
; RV32IFD-NEXT: fle.d a0, fa5, fa0
; RV32IFD-NEXT: neg s0, a0
; RV32IFD-NEXT: xori a0, a0, 1
; RV32IFD-NEXT: addi s1, a0, -1
; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: lui a2, %hi(.LCPI14_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI14_0)(a2)
; RV32IFD-NEXT: and a0, s0, a0
; RV32IFD-NEXT: flt.d a2, fa5, fs0
; RV32IFD-NEXT: neg a2, a2
; RV32IFD-NEXT: or a0, a2, a0
; RV32IFD-NEXT: and a1, s0, a1
; RV32IFD-NEXT: or a1, a2, a1
; RV32IFD-NEXT: and a0, s1, a0
; RV32IFD-NEXT: or a0, s0, a0
; RV32IFD-NEXT: and a1, s1, a1
; RV32IFD-NEXT: or a1, s0, a1
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
Expand All @@ -1054,11 +1066,12 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI14_0)
; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI14_0+4)(a4)
; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI14_0)(a4)
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg a2, a2
; RV32IZFINXZDINX-NEXT: lui a3, %hi(.LCPI14_0)
; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI14_0)(a3)
; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI14_0+4)(a3)
; RV32IZFINXZDINX-NEXT: xori a2, a2, 1
; RV32IZFINXZDINX-NEXT: addi a2, a2, -1
; RV32IZFINXZDINX-NEXT: and a0, a2, a0
; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0
; RV32IZFINXZDINX-NEXT: neg a3, a3
Expand Down
804 changes: 432 additions & 372 deletions llvm/test/CodeGen/RISCV/double-round-conv-sat.ll

Large diffs are not rendered by default.

236 changes: 124 additions & 112 deletions llvm/test/CodeGen/RISCV/float-convert.ll
Original file line number Diff line number Diff line change
Expand Up @@ -275,24 +275,26 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 325632
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg s1, a0
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s1, a0, -1
; RV32I-NEXT: addi s2, a0, -1
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: and s1, s1, a0
; RV32I-NEXT: lui a1, 325632
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: or a0, a0, s1
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: or a0, s1, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
Expand Down Expand Up @@ -613,40 +615,47 @@ define i64 @fcvt_l_s(float %a) nounwind {
define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IF-LABEL: fcvt_l_s_sat:
; RV32IF: # %bb.0: # %start
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
; RV32IF-NEXT: addi sp, sp, -32
; RV32IF-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IF-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IF-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IF-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32IF-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: lui a0, %hi(.LCPI12_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI12_0)(a0)
; RV32IF-NEXT: fmv.s fs0, fa0
; RV32IF-NEXT: flt.s s0, fa5, fa0
; RV32IF-NEXT: neg s1, s0
; RV32IF-NEXT: lui a0, 913408
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fle.s s0, fa5, fa0
; RV32IF-NEXT: fle.s s2, fa5, fa0
; RV32IF-NEXT: neg s3, s2
; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: and a0, s3, a0
; RV32IF-NEXT: or a0, s1, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: lui a2, 524288
; RV32IF-NEXT: beqz s0, .LBB12_2
; RV32IF-NEXT: li a5, 1
; RV32IF-NEXT: lui a3, 524288
; RV32IF-NEXT: bne s2, a5, .LBB12_2
; RV32IF-NEXT: # %bb.1: # %start
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: mv a3, a1
; RV32IF-NEXT: .LBB12_2: # %start
; RV32IF-NEXT: lui a1, %hi(.LCPI12_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI12_0)(a1)
; RV32IF-NEXT: flt.s a3, fa5, fs0
; RV32IF-NEXT: beqz a3, .LBB12_4
; RV32IF-NEXT: and a0, a2, a0
; RV32IF-NEXT: beqz s0, .LBB12_4
; RV32IF-NEXT: # %bb.3:
; RV32IF-NEXT: addi a2, a4, -1
; RV32IF-NEXT: addi a3, a4, -1
; RV32IF-NEXT: .LBB12_4: # %start
; RV32IF-NEXT: feq.s a1, fs0, fs0
; RV32IF-NEXT: neg a4, a1
; RV32IF-NEXT: and a1, a4, a2
; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: neg a3, s0
; RV32IF-NEXT: and a0, a3, a0
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: and a0, a4, a0
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: and a1, a2, a3
; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 32
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: fcvt_l_s_sat:
Expand All @@ -664,35 +673,38 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
; RV32IZFINX-NEXT: neg s2, s1
; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI12_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI12_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
; RV32IZFINX-NEXT: flt.s a3, a2, s0
; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
; RV32IZFINX-NEXT: li a6, 1
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB12_2
; RV32IZFINX-NEXT: bne s1, a6, .LBB12_2
; RV32IZFINX-NEXT: # %bb.1: # %start
; RV32IZFINX-NEXT: mv a2, a1
; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB12_2: # %start
; RV32IZFINX-NEXT: lui a1, %hi(.LCPI12_0)
; RV32IZFINX-NEXT: lw a1, %lo(.LCPI12_0)(a1)
; RV32IZFINX-NEXT: flt.s a3, a1, s0
; RV32IZFINX-NEXT: and a0, a2, a0
; RV32IZFINX-NEXT: beqz a3, .LBB12_4
; RV32IZFINX-NEXT: # %bb.3:
; RV32IZFINX-NEXT: addi a2, a4, -1
; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB12_4: # %start
; RV32IZFINX-NEXT: feq.s a1, s0, s0
; RV32IZFINX-NEXT: neg a4, a1
; RV32IZFINX-NEXT: and a1, a4, a2
; RV32IZFINX-NEXT: neg a2, s1
; RV32IZFINX-NEXT: and a0, a2, a0
; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: and a0, a4, a0
; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
;
Expand Down Expand Up @@ -863,23 +875,23 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fmv.s fs0, fa0
; RV32IF-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IF-NEXT: lui a0, %hi(.LCPI14_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI14_0)(a0)
; RV32IF-NEXT: flt.s a0, fa5, fa0
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.w.x fa5, zero
; RV32IF-NEXT: fle.s a0, fa5, fa0
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: xori a0, a0, 1
; RV32IF-NEXT: addi s1, a0, -1
; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lui a2, %hi(.LCPI14_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI14_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
; RV32IF-NEXT: flt.s a2, fa5, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: and a1, s0, a1
; RV32IF-NEXT: or a1, a2, a1
; RV32IF-NEXT: and a0, s1, a0
; RV32IF-NEXT: or a0, s0, a0
; RV32IF-NEXT: and a1, s1, a1
; RV32IF-NEXT: or a1, s0, a1
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
Expand All @@ -898,19 +910,18 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: fle.s a0, zero, a0
; RV32IZFINX-NEXT: neg s1, a0
; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: lui a1, %hi(.LCPI14_0)
; RV32IZFINX-NEXT: lw a1, %lo(.LCPI14_0)(a1)
; RV32IZFINX-NEXT: flt.s a1, a1, a0
; RV32IZFINX-NEXT: neg s0, a1
; RV32IZFINX-NEXT: fle.s a1, zero, a0
; RV32IZFINX-NEXT: xori a1, a1, 1
; RV32IZFINX-NEXT: addi s1, a1, -1
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI14_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI14_0)(a2)
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: flt.s a2, a2, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: or a0, s0, a0
; RV32IZFINX-NEXT: and a1, s1, a1
; RV32IZFINX-NEXT: or a1, a2, a1
; RV32IZFINX-NEXT: or a1, s0, a1
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
Expand All @@ -928,36 +939,33 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
;
; RV32I-LABEL: fcvt_lu_s_sat:
; RV32I: # %bb.0: # %start
; RV32I-NEXT: addi sp, sp, -32
; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 391168
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg s1, a0
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s2, a0, -1
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __fixunssfdi
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: and s3, s2, a0
; RV32I-NEXT: lui a1, 391168
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg a1, a0
; RV32I-NEXT: or a0, a1, s3
; RV32I-NEXT: and a2, s2, s1
; RV32I-NEXT: or a1, a1, a2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: or a0, s1, a0
; RV32I-NEXT: and a1, s2, a1
; RV32I-NEXT: or a1, s1, a1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: fcvt_lu_s_sat:
Expand All @@ -966,24 +974,26 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 391168
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: neg s1, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __gesf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: addi s1, a0, -1
; RV64I-NEXT: addi s2, a0, -1
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: and s1, s1, a0
; RV64I-NEXT: lui a1, 391168
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: or a0, a0, s1
; RV64I-NEXT: and a0, s2, a0
; RV64I-NEXT: or a0, s1, a0
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
start:
Expand Down Expand Up @@ -2089,24 +2099,26 @@ define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 325632
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg s1, a0
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s1, a0, -1
; RV32I-NEXT: addi s2, a0, -1
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: and s1, s1, a0
; RV32I-NEXT: lui a1, 325632
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: or a0, a0, s1
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: or a0, s1, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
Expand Down
636 changes: 324 additions & 312 deletions llvm/test/CodeGen/RISCV/float-round-conv-sat.ll

Large diffs are not rendered by default.

5 changes: 3 additions & 2 deletions llvm/test/CodeGen/RISCV/forced-atomics.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3567,8 +3567,8 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
; RV32-NEXT: # in Loop: Header=BB51_2 Depth=1
; RV32-NEXT: neg a3, a0
; RV32-NEXT: and a3, a3, a1
; RV32-NEXT: sw a4, 0(sp)
; RV32-NEXT: sw a1, 4(sp)
; RV32-NEXT: sw a4, 0(sp)
; RV32-NEXT: mv a1, sp
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
Expand Down Expand Up @@ -3672,7 +3672,8 @@ define i64 @rmw64_umin_seq_cst(ptr %p) nounwind {
; RV32-NEXT: .LBB52_2: # %atomicrmw.start
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-NEXT: sltiu a0, a4, 2
; RV32-NEXT: seqz a2, a1
; RV32-NEXT: snez a2, a1
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a0, a2, a0
; RV32-NEXT: mv a2, a4
; RV32-NEXT: bnez a0, .LBB52_1
Expand Down
350 changes: 184 additions & 166 deletions llvm/test/CodeGen/RISCV/fpclamptosat.ll

Large diffs are not rendered by default.

370 changes: 212 additions & 158 deletions llvm/test/CodeGen/RISCV/half-convert.ll

Large diffs are not rendered by default.

1,356 changes: 690 additions & 666 deletions llvm/test/CodeGen/RISCV/half-round-conv-sat.ll

Large diffs are not rendered by default.

88 changes: 44 additions & 44 deletions llvm/test/CodeGen/RISCV/iabs.ll
Original file line number Diff line number Diff line change
Expand Up @@ -302,56 +302,56 @@ define i128 @abs128(i128 %x) {
; RV32I-LABEL: abs128:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a2, 12(a1)
; RV32I-NEXT: lw a3, 4(a1)
; RV32I-NEXT: lw a4, 0(a1)
; RV32I-NEXT: lw a3, 0(a1)
; RV32I-NEXT: lw a4, 4(a1)
; RV32I-NEXT: lw a1, 8(a1)
; RV32I-NEXT: bgez a2, .LBB8_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: neg a5, a1
; RV32I-NEXT: or a6, a4, a3
; RV32I-NEXT: snez a6, a6
; RV32I-NEXT: sltu a7, a5, a6
; RV32I-NEXT: snez a6, a4
; RV32I-NEXT: snez a7, a3
; RV32I-NEXT: or a6, a7, a6
; RV32I-NEXT: sltu t0, a5, a6
; RV32I-NEXT: snez a1, a1
; RV32I-NEXT: add a1, a2, a1
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: sub a2, a1, a7
; RV32I-NEXT: sub a2, a1, t0
; RV32I-NEXT: sub a1, a5, a6
; RV32I-NEXT: snez a5, a4
; RV32I-NEXT: neg a3, a3
; RV32I-NEXT: sub a3, a3, a5
; RV32I-NEXT: neg a4, a4
; RV32I-NEXT: sub a4, a4, a7
; RV32I-NEXT: neg a3, a3
; RV32I-NEXT: .LBB8_2:
; RV32I-NEXT: sw a4, 0(a0)
; RV32I-NEXT: sw a3, 0(a0)
; RV32I-NEXT: sw a4, 4(a0)
; RV32I-NEXT: sw a1, 8(a0)
; RV32I-NEXT: sw a3, 4(a0)
; RV32I-NEXT: sw a2, 12(a0)
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: abs128:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: lw a2, 12(a1)
; RV32ZBB-NEXT: lw a3, 4(a1)
; RV32ZBB-NEXT: lw a4, 0(a1)
; RV32ZBB-NEXT: lw a3, 0(a1)
; RV32ZBB-NEXT: lw a4, 4(a1)
; RV32ZBB-NEXT: lw a1, 8(a1)
; RV32ZBB-NEXT: bgez a2, .LBB8_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: neg a5, a1
; RV32ZBB-NEXT: or a6, a4, a3
; RV32ZBB-NEXT: snez a6, a6
; RV32ZBB-NEXT: sltu a7, a5, a6
; RV32ZBB-NEXT: snez a6, a4
; RV32ZBB-NEXT: snez a7, a3
; RV32ZBB-NEXT: or a6, a7, a6
; RV32ZBB-NEXT: sltu t0, a5, a6
; RV32ZBB-NEXT: snez a1, a1
; RV32ZBB-NEXT: add a1, a2, a1
; RV32ZBB-NEXT: neg a1, a1
; RV32ZBB-NEXT: sub a2, a1, a7
; RV32ZBB-NEXT: sub a2, a1, t0
; RV32ZBB-NEXT: sub a1, a5, a6
; RV32ZBB-NEXT: snez a5, a4
; RV32ZBB-NEXT: neg a3, a3
; RV32ZBB-NEXT: sub a3, a3, a5
; RV32ZBB-NEXT: neg a4, a4
; RV32ZBB-NEXT: sub a4, a4, a7
; RV32ZBB-NEXT: neg a3, a3
; RV32ZBB-NEXT: .LBB8_2:
; RV32ZBB-NEXT: sw a4, 0(a0)
; RV32ZBB-NEXT: sw a3, 0(a0)
; RV32ZBB-NEXT: sw a4, 4(a0)
; RV32ZBB-NEXT: sw a1, 8(a0)
; RV32ZBB-NEXT: sw a3, 4(a0)
; RV32ZBB-NEXT: sw a2, 12(a0)
; RV32ZBB-NEXT: ret
;
Expand Down Expand Up @@ -384,56 +384,56 @@ define i128 @select_abs128(i128 %x) {
; RV32I-LABEL: select_abs128:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a2, 12(a1)
; RV32I-NEXT: lw a3, 4(a1)
; RV32I-NEXT: lw a4, 0(a1)
; RV32I-NEXT: lw a3, 0(a1)
; RV32I-NEXT: lw a4, 4(a1)
; RV32I-NEXT: lw a1, 8(a1)
; RV32I-NEXT: bgez a2, .LBB9_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: neg a5, a1
; RV32I-NEXT: or a6, a4, a3
; RV32I-NEXT: snez a6, a6
; RV32I-NEXT: sltu a7, a5, a6
; RV32I-NEXT: snez a6, a4
; RV32I-NEXT: snez a7, a3
; RV32I-NEXT: or a6, a7, a6
; RV32I-NEXT: sltu t0, a5, a6
; RV32I-NEXT: snez a1, a1
; RV32I-NEXT: add a1, a2, a1
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: sub a2, a1, a7
; RV32I-NEXT: sub a2, a1, t0
; RV32I-NEXT: sub a1, a5, a6
; RV32I-NEXT: snez a5, a4
; RV32I-NEXT: neg a3, a3
; RV32I-NEXT: sub a3, a3, a5
; RV32I-NEXT: neg a4, a4
; RV32I-NEXT: sub a4, a4, a7
; RV32I-NEXT: neg a3, a3
; RV32I-NEXT: .LBB9_2:
; RV32I-NEXT: sw a4, 0(a0)
; RV32I-NEXT: sw a3, 0(a0)
; RV32I-NEXT: sw a4, 4(a0)
; RV32I-NEXT: sw a1, 8(a0)
; RV32I-NEXT: sw a3, 4(a0)
; RV32I-NEXT: sw a2, 12(a0)
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: select_abs128:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: lw a2, 12(a1)
; RV32ZBB-NEXT: lw a3, 4(a1)
; RV32ZBB-NEXT: lw a4, 0(a1)
; RV32ZBB-NEXT: lw a3, 0(a1)
; RV32ZBB-NEXT: lw a4, 4(a1)
; RV32ZBB-NEXT: lw a1, 8(a1)
; RV32ZBB-NEXT: bgez a2, .LBB9_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: neg a5, a1
; RV32ZBB-NEXT: or a6, a4, a3
; RV32ZBB-NEXT: snez a6, a6
; RV32ZBB-NEXT: sltu a7, a5, a6
; RV32ZBB-NEXT: snez a6, a4
; RV32ZBB-NEXT: snez a7, a3
; RV32ZBB-NEXT: or a6, a7, a6
; RV32ZBB-NEXT: sltu t0, a5, a6
; RV32ZBB-NEXT: snez a1, a1
; RV32ZBB-NEXT: add a1, a2, a1
; RV32ZBB-NEXT: neg a1, a1
; RV32ZBB-NEXT: sub a2, a1, a7
; RV32ZBB-NEXT: sub a2, a1, t0
; RV32ZBB-NEXT: sub a1, a5, a6
; RV32ZBB-NEXT: snez a5, a4
; RV32ZBB-NEXT: neg a3, a3
; RV32ZBB-NEXT: sub a3, a3, a5
; RV32ZBB-NEXT: neg a4, a4
; RV32ZBB-NEXT: sub a4, a4, a7
; RV32ZBB-NEXT: neg a3, a3
; RV32ZBB-NEXT: .LBB9_2:
; RV32ZBB-NEXT: sw a4, 0(a0)
; RV32ZBB-NEXT: sw a3, 0(a0)
; RV32ZBB-NEXT: sw a4, 4(a0)
; RV32ZBB-NEXT: sw a1, 8(a0)
; RV32ZBB-NEXT: sw a3, 4(a0)
; RV32ZBB-NEXT: sw a2, 12(a0)
; RV32ZBB-NEXT: ret
;
Expand Down
22 changes: 22 additions & 0 deletions llvm/test/CodeGen/RISCV/pr84200.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc < %s -mtriple=riscv64 | FileCheck %s

; The sub nuw produces poison if the input is not 0 or 1. We must insert a
; freeze before converting the sub to AND so that we don't propagate poison.
define i64 @foo(i64 %1) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li a1, 1
; CHECK-NEXT: sub a1, a1, a0
; CHECK-NEXT: sltiu a0, a0, 2
; CHECK-NEXT: xori a1, a1, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: ret
entry:
%.urem.i = sub nuw i64 1, %1
%.cmp.i = icmp ugt i64 %1, 1
%2 = xor i64 %.urem.i, 1
%3 = select i1 %.cmp.i, i64 0, i64 %2
ret i64 %3
}
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
Original file line number Diff line number Diff line change
Expand Up @@ -298,14 +298,14 @@ define i32 @not_shl_one_i32(i32 %x) {
define i64 @not_shl_one_i64(i64 %x) {
; CHECK-LABEL: not_shl_one_i64:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 1
; CHECK-NEXT: sll a1, a1, a0
; CHECK-NEXT: addi a0, a0, -32
; CHECK-NEXT: slti a0, a0, 0
; CHECK-NEXT: neg a2, a0
; CHECK-NEXT: and a2, a2, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a1, a0, a1
; CHECK-NEXT: addi a1, a0, -32
; CHECK-NEXT: slti a1, a1, 0
; CHECK-NEXT: neg a2, a1
; CHECK-NEXT: li a3, 1
; CHECK-NEXT: sll a0, a3, a0
; CHECK-NEXT: and a2, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a0
; CHECK-NEXT: not a0, a2
; CHECK-NEXT: not a1, a1
; CHECK-NEXT: ret
Expand Down
40 changes: 20 additions & 20 deletions llvm/test/CodeGen/RISCV/rv32zbs.ll
Original file line number Diff line number Diff line change
Expand Up @@ -48,20 +48,20 @@ define i32 @bclr_i32_no_mask(i32 %a, i32 %b) nounwind {
define i64 @bclr_i64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: bclr_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: li a3, 1
; RV32I-NEXT: sll a4, a3, a2
; RV32I-NEXT: andi a2, a2, 63
; RV32I-NEXT: addi a5, a2, -32
; RV32I-NEXT: slti a5, a5, 0
; RV32I-NEXT: neg a6, a5
; RV32I-NEXT: and a4, a6, a4
; RV32I-NEXT: sll a2, a3, a2
; RV32I-NEXT: addi a5, a5, -1
; RV32I-NEXT: andi a3, a2, 63
; RV32I-NEXT: addi a4, a3, -32
; RV32I-NEXT: slti a4, a4, 0
; RV32I-NEXT: neg a5, a4
; RV32I-NEXT: li a6, 1
; RV32I-NEXT: sll a2, a6, a2
; RV32I-NEXT: and a2, a5, a2
; RV32I-NEXT: not a3, a4
; RV32I-NEXT: sll a3, a6, a3
; RV32I-NEXT: addi a4, a4, -1
; RV32I-NEXT: and a3, a4, a3
; RV32I-NEXT: not a2, a2
; RV32I-NEXT: and a0, a3, a0
; RV32I-NEXT: and a1, a2, a1
; RV32I-NEXT: not a3, a3
; RV32I-NEXT: and a0, a2, a0
; RV32I-NEXT: and a1, a3, a1
; RV32I-NEXT: ret
;
; RV32ZBSNOZBB-LABEL: bclr_i64:
Expand Down Expand Up @@ -186,14 +186,14 @@ define i64 @bset_i64(i64 %a, i64 %b) nounwind {
define signext i64 @bset_i64_zero(i64 signext %a) nounwind {
; RV32I-LABEL: bset_i64_zero:
; RV32I: # %bb.0:
; RV32I-NEXT: li a1, 1
; RV32I-NEXT: sll a1, a1, a0
; RV32I-NEXT: addi a0, a0, -32
; RV32I-NEXT: slti a2, a0, 0
; RV32I-NEXT: neg a0, a2
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: addi a2, a2, -1
; RV32I-NEXT: and a1, a2, a1
; RV32I-NEXT: addi a1, a0, -32
; RV32I-NEXT: slti a1, a1, 0
; RV32I-NEXT: neg a2, a1
; RV32I-NEXT: li a3, 1
; RV32I-NEXT: sll a3, a3, a0
; RV32I-NEXT: and a0, a2, a3
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: and a1, a1, a3
; RV32I-NEXT: ret
;
; RV32ZBS-LABEL: bset_i64_zero:
Expand Down
16 changes: 6 additions & 10 deletions llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
Original file line number Diff line number Diff line change
Expand Up @@ -489,7 +489,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: snez a1, s0
; RV64I-NEXT: addi a1, a1, -1
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: or a0, a1, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
Expand All @@ -513,7 +513,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
; RV64XTHEADBB-NEXT: add a0, a1, a0
; RV64XTHEADBB-NEXT: lbu a0, 0(a0)
; RV64XTHEADBB-NEXT: snez a1, s0
; RV64XTHEADBB-NEXT: addi a1, a1, -1
; RV64XTHEADBB-NEXT: addiw a1, a1, -1
; RV64XTHEADBB-NEXT: or a0, a1, a0
; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64XTHEADBB-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
Expand Down Expand Up @@ -542,12 +542,10 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
; RV64I-NEXT: addi a1, a1, %lo(.LCPI9_0)
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: addi a0, a0, 1
; RV64I-NEXT: addiw a0, a0, 1
; RV64I-NEXT: seqz a1, s0
; RV64I-NEXT: addi a1, a1, -1
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: and a0, a1, a0
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
Expand All @@ -569,12 +567,10 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
; RV64XTHEADBB-NEXT: addi a1, a1, %lo(.LCPI9_0)
; RV64XTHEADBB-NEXT: add a0, a1, a0
; RV64XTHEADBB-NEXT: lbu a0, 0(a0)
; RV64XTHEADBB-NEXT: addi a0, a0, 1
; RV64XTHEADBB-NEXT: addiw a0, a0, 1
; RV64XTHEADBB-NEXT: seqz a1, s0
; RV64XTHEADBB-NEXT: addi a1, a1, -1
; RV64XTHEADBB-NEXT: addiw a1, a1, -1
; RV64XTHEADBB-NEXT: and a0, a1, a0
; RV64XTHEADBB-NEXT: slli a0, a0, 32
; RV64XTHEADBB-NEXT: srli a0, a0, 32
; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64XTHEADBB-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64XTHEADBB-NEXT: addi sp, sp, 16
Expand Down
13 changes: 5 additions & 8 deletions llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
Original file line number Diff line number Diff line change
Expand Up @@ -444,7 +444,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: snez a1, s0
; RV64I-NEXT: addi a1, a1, -1
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: or a0, a1, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
Expand Down Expand Up @@ -481,12 +481,10 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
; RV64I-NEXT: addi a1, a1, %lo(.LCPI9_0)
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: addi a0, a0, 1
; RV64I-NEXT: addiw a0, a0, 1
; RV64I-NEXT: seqz a1, s0
; RV64I-NEXT: addi a1, a1, -1
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: and a0, a1, a0
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
Expand All @@ -495,11 +493,10 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
; RV64ZBB-LABEL: ffs_i32:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: ctzw a1, a0
; RV64ZBB-NEXT: addi a1, a1, 1
; RV64ZBB-NEXT: addiw a1, a1, 1
; RV64ZBB-NEXT: seqz a0, a0
; RV64ZBB-NEXT: addi a0, a0, -1
; RV64ZBB-NEXT: addiw a0, a0, -1
; RV64ZBB-NEXT: and a0, a0, a1
; RV64ZBB-NEXT: zext.h a0, a0
; RV64ZBB-NEXT: ret
%1 = call i32 @llvm.cttz.i32(i32 %a, i1 true)
%2 = add i32 %1, 1
Expand Down
617 changes: 306 additions & 311 deletions llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll

Large diffs are not rendered by default.

26 changes: 13 additions & 13 deletions llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,18 @@ define void @vec3_setcc_crash(ptr %in, ptr %out) {
; RV32: # %bb.0:
; RV32-NEXT: lw a0, 0(a0)
; RV32-NEXT: srli a2, a0, 16
; RV32-NEXT: srli a3, a0, 8
; RV32-NEXT: slli a4, a0, 16
; RV32-NEXT: srai a4, a4, 24
; RV32-NEXT: slli a3, a0, 16
; RV32-NEXT: srli a4, a3, 24
; RV32-NEXT: srai a3, a3, 24
; RV32-NEXT: slli a5, a0, 24
; RV32-NEXT: srai a5, a5, 24
; RV32-NEXT: slli a6, a0, 8
; RV32-NEXT: srai a6, a6, 24
; RV32-NEXT: sgtz a6, a6
; RV32-NEXT: sgtz a5, a5
; RV32-NEXT: sgtz a4, a4
; RV32-NEXT: neg a4, a4
; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: sgtz a3, a3
; RV32-NEXT: neg a3, a3
; RV32-NEXT: and a3, a3, a4
; RV32-NEXT: slli a3, a3, 8
; RV32-NEXT: neg a4, a5
; RV32-NEXT: and a0, a4, a0
Expand All @@ -39,19 +39,19 @@ define void @vec3_setcc_crash(ptr %in, ptr %out) {
; RV64-LABEL: vec3_setcc_crash:
; RV64: # %bb.0:
; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: srli a2, a0, 16
; RV64-NEXT: srli a3, a0, 8
; RV64-NEXT: slli a4, a0, 48
; RV64-NEXT: srai a4, a4, 56
; RV64-NEXT: srliw a2, a0, 16
; RV64-NEXT: slli a3, a0, 48
; RV64-NEXT: srli a4, a3, 56
; RV64-NEXT: srai a3, a3, 56
; RV64-NEXT: slli a5, a0, 56
; RV64-NEXT: srai a5, a5, 56
; RV64-NEXT: slli a6, a0, 40
; RV64-NEXT: srai a6, a6, 56
; RV64-NEXT: sgtz a6, a6
; RV64-NEXT: sgtz a5, a5
; RV64-NEXT: sgtz a4, a4
; RV64-NEXT: negw a4, a4
; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: sgtz a3, a3
; RV64-NEXT: negw a3, a3
; RV64-NEXT: and a3, a3, a4
; RV64-NEXT: slli a3, a3, 8
; RV64-NEXT: negw a4, a5
; RV64-NEXT: and a0, a4, a0
Expand Down
9 changes: 6 additions & 3 deletions llvm/test/CodeGen/RISCV/signed-truncation-check.ll
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,8 @@ define i1 @add_ugecmp_i64_i16(i64 %x) nounwind {
; RV32I-NEXT: lui a1, 1048560
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: sltu a1, a1, a2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: ret
;
Expand Down Expand Up @@ -462,7 +463,8 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
; RV32I-NEXT: addi a2, a0, -128
; RV32I-NEXT: sltu a0, a2, a0
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: sltiu a1, a2, -256
; RV32I-NEXT: xori a1, a1, 1
; RV32I-NEXT: and a0, a0, a1
Expand Down Expand Up @@ -691,7 +693,8 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
; RV32I-NEXT: addi a2, a0, 128
; RV32I-NEXT: sltu a0, a2, a0
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: sltiu a1, a2, 256
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: ret
Expand Down
127 changes: 127 additions & 0 deletions llvm/test/CodeGen/X86/GlobalISel/legalize-sdiv.mir
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,X64
# RUN: llc -mtriple=i686-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,X86

...
---
name: test_sdiv_i8
tracksRegLiveness: true
body: |
bb.1:
liveins: $edi, $esi
; CHECK-LABEL: name: test_sdiv_i8
; CHECK: liveins: $edi, $esi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[SDIV:%[0-9]+]]:_(s8) = G_SDIV [[TRUNC]], [[TRUNC1]]
; CHECK-NEXT: $al = COPY [[SDIV]](s8)
; CHECK-NEXT: RET 0, implicit $al
%2:_(s32) = COPY $edi
%0:_(s8) = G_TRUNC %2(s32)
%3:_(s32) = COPY $esi
%1:_(s8) = G_TRUNC %3(s32)
%4:_(s8) = G_SDIV %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
...
---
name: test_sdiv_i16
tracksRegLiveness: true
body: |
bb.1:
liveins: $edi, $esi
; CHECK-LABEL: name: test_sdiv_i16
; CHECK: liveins: $edi, $esi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[SDIV:%[0-9]+]]:_(s16) = G_SDIV [[TRUNC]], [[TRUNC1]]
; CHECK-NEXT: $ax = COPY [[SDIV]](s16)
; CHECK-NEXT: RET 0, implicit $ax
%2:_(s32) = COPY $edi
%0:_(s16) = G_TRUNC %2(s32)
%3:_(s32) = COPY $esi
%1:_(s16) = G_TRUNC %3(s32)
%4:_(s16) = G_SDIV %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
...
---
name: test_sdiv_i32
tracksRegLiveness: true
body: |
bb.1:
liveins: $edi, $esi
; CHECK-LABEL: name: test_sdiv_i32
; CHECK: liveins: $edi, $esi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK-NEXT: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[COPY]], [[COPY1]]
; CHECK-NEXT: $eax = COPY [[SDIV]](s32)
; CHECK-NEXT: RET 0, implicit $eax
%0:_(s32) = COPY $edi
%1:_(s32) = COPY $esi
%2:_(s32) = G_SDIV %0, %1
$eax = COPY %2(s32)
RET 0, implicit $eax
...
---
name: test_sdiv_i64
tracksRegLiveness: true
body: |
bb.1:
; X64-LABEL: name: test_sdiv_i64
; X64: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X64-NEXT: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X64-NEXT: [[SDIV:%[0-9]+]]:_(s64) = G_SDIV [[DEF]], [[DEF1]]
; X64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[SDIV]](s64)
; X64-NEXT: RET 0, implicit [[COPY]](s64)
;
; X86-LABEL: name: test_sdiv_i64
; X86: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X86-NEXT: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X86-NEXT: ADJCALLSTACKDOWN32 16, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](s64)
; X86-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X86-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
; X86-NEXT: G_STORE [[UV]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack, align 1)
; X86-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; X86-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
; X86-NEXT: G_STORE [[UV1]](s32), [[PTR_ADD1]](p0) :: (store (s32) into stack + 4, align 1)
; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF1]](s64)
; X86-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; X86-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s32)
; X86-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD2]](p0) :: (store (s32) into stack + 8, align 1)
; X86-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; X86-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C3]](s32)
; X86-NEXT: G_STORE [[UV3]](s32), [[PTR_ADD3]](p0) :: (store (s32) into stack + 12, align 1)
; X86-NEXT: CALLpcrel32 &__divdi3, csr_32, implicit $esp, implicit $ssp, implicit-def $eax, implicit-def $edx
; X86-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $eax
; X86-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $edx
; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
; X86-NEXT: ADJCALLSTACKUP32 16, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; X86-NEXT: RET 0, implicit [[COPY6]](s64)
%0:_(s64) = IMPLICIT_DEF
%1:_(s64) = IMPLICIT_DEF
%2:_(s64) = G_SDIV %0, %1
%3:_(s64) = COPY %2(s64)
RET 0, implicit %3
...
127 changes: 127 additions & 0 deletions llvm/test/CodeGen/X86/GlobalISel/legalize-srem.mir
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,X64
# RUN: llc -mtriple=i686-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,X86

...
---
name: test_srem_i8
tracksRegLiveness: true
body: |
bb.1:
liveins: $edi, $esi
; CHECK-LABEL: name: test_srem_i8
; CHECK: liveins: $edi, $esi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[SREM:%[0-9]+]]:_(s8) = G_SREM [[TRUNC]], [[TRUNC1]]
; CHECK-NEXT: $al = COPY [[SREM]](s8)
; CHECK-NEXT: RET 0, implicit $al
%2:_(s32) = COPY $edi
%0:_(s8) = G_TRUNC %2(s32)
%3:_(s32) = COPY $esi
%1:_(s8) = G_TRUNC %3(s32)
%4:_(s8) = G_SREM %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
...
---
name: test_srem_i16
tracksRegLiveness: true
body: |
bb.1:
liveins: $edi, $esi
; CHECK-LABEL: name: test_srem_i16
; CHECK: liveins: $edi, $esi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[SREM:%[0-9]+]]:_(s16) = G_SREM [[TRUNC]], [[TRUNC1]]
; CHECK-NEXT: $ax = COPY [[SREM]](s16)
; CHECK-NEXT: RET 0, implicit $ax
%2:_(s32) = COPY $edi
%0:_(s16) = G_TRUNC %2(s32)
%3:_(s32) = COPY $esi
%1:_(s16) = G_TRUNC %3(s32)
%4:_(s16) = G_SREM %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
...
---
name: test_srem_i32
tracksRegLiveness: true
body: |
bb.1:
liveins: $edi, $esi
; CHECK-LABEL: name: test_srem_i32
; CHECK: liveins: $edi, $esi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK-NEXT: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[COPY]], [[COPY1]]
; CHECK-NEXT: $eax = COPY [[SREM]](s32)
; CHECK-NEXT: RET 0, implicit $eax
%0:_(s32) = COPY $edi
%1:_(s32) = COPY $esi
%2:_(s32) = G_SREM %0, %1
$eax = COPY %2(s32)
RET 0, implicit $eax
...
---
name: test_srem_i64
tracksRegLiveness: true
body: |
bb.1:
; X64-LABEL: name: test_srem_i64
; X64: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X64-NEXT: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X64-NEXT: [[SREM:%[0-9]+]]:_(s64) = G_SREM [[DEF]], [[DEF1]]
; X64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[SREM]](s64)
; X64-NEXT: RET 0, implicit [[COPY]](s64)
;
; X86-LABEL: name: test_srem_i64
; X86: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X86-NEXT: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X86-NEXT: ADJCALLSTACKDOWN32 16, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](s64)
; X86-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X86-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
; X86-NEXT: G_STORE [[UV]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack, align 1)
; X86-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; X86-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
; X86-NEXT: G_STORE [[UV1]](s32), [[PTR_ADD1]](p0) :: (store (s32) into stack + 4, align 1)
; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF1]](s64)
; X86-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; X86-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s32)
; X86-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD2]](p0) :: (store (s32) into stack + 8, align 1)
; X86-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; X86-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C3]](s32)
; X86-NEXT: G_STORE [[UV3]](s32), [[PTR_ADD3]](p0) :: (store (s32) into stack + 12, align 1)
; X86-NEXT: CALLpcrel32 &__moddi3, csr_32, implicit $esp, implicit $ssp, implicit-def $eax, implicit-def $edx
; X86-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $eax
; X86-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $edx
; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
; X86-NEXT: ADJCALLSTACKUP32 16, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; X86-NEXT: RET 0, implicit [[COPY6]](s64)
%0:_(s64) = IMPLICIT_DEF
%1:_(s64) = IMPLICIT_DEF
%2:_(s64) = G_SREM %0, %1
%3:_(s64) = COPY %2(s64)
RET 0, implicit %3
...
127 changes: 127 additions & 0 deletions llvm/test/CodeGen/X86/GlobalISel/legalize-udiv.mir
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,X64
# RUN: llc -mtriple=i686-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,X86

...
---
name: test_udiv_i8
tracksRegLiveness: true
body: |
bb.1:
liveins: $edi, $esi
; CHECK-LABEL: name: test_udiv_i8
; CHECK: liveins: $edi, $esi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[UDIV:%[0-9]+]]:_(s8) = G_UDIV [[TRUNC]], [[TRUNC1]]
; CHECK-NEXT: $al = COPY [[UDIV]](s8)
; CHECK-NEXT: RET 0, implicit $al
%2:_(s32) = COPY $edi
%0:_(s8) = G_TRUNC %2(s32)
%3:_(s32) = COPY $esi
%1:_(s8) = G_TRUNC %3(s32)
%4:_(s8) = G_UDIV %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
...
---
name: test_udiv_i16
tracksRegLiveness: true
body: |
bb.1:
liveins: $edi, $esi
; CHECK-LABEL: name: test_udiv_i16
; CHECK: liveins: $edi, $esi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[UDIV:%[0-9]+]]:_(s16) = G_UDIV [[TRUNC]], [[TRUNC1]]
; CHECK-NEXT: $ax = COPY [[UDIV]](s16)
; CHECK-NEXT: RET 0, implicit $ax
%2:_(s32) = COPY $edi
%0:_(s16) = G_TRUNC %2(s32)
%3:_(s32) = COPY $esi
%1:_(s16) = G_TRUNC %3(s32)
%4:_(s16) = G_UDIV %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
...
---
name: test_udiv_i32
tracksRegLiveness: true
body: |
bb.1:
liveins: $edi, $esi
; CHECK-LABEL: name: test_udiv_i32
; CHECK: liveins: $edi, $esi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK-NEXT: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[COPY]], [[COPY1]]
; CHECK-NEXT: $eax = COPY [[UDIV]](s32)
; CHECK-NEXT: RET 0, implicit $eax
%0:_(s32) = COPY $edi
%1:_(s32) = COPY $esi
%2:_(s32) = G_UDIV %0, %1
$eax = COPY %2(s32)
RET 0, implicit $eax
...
---
name: test_udiv_i64
tracksRegLiveness: true
body: |
bb.1:
; X64-LABEL: name: test_udiv_i64
; X64: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X64-NEXT: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X64-NEXT: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[DEF]], [[DEF1]]
; X64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[UDIV]](s64)
; X64-NEXT: RET 0, implicit [[COPY]](s64)
;
; X86-LABEL: name: test_udiv_i64
; X86: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X86-NEXT: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X86-NEXT: ADJCALLSTACKDOWN32 16, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](s64)
; X86-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X86-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
; X86-NEXT: G_STORE [[UV]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack, align 1)
; X86-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; X86-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
; X86-NEXT: G_STORE [[UV1]](s32), [[PTR_ADD1]](p0) :: (store (s32) into stack + 4, align 1)
; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF1]](s64)
; X86-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; X86-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s32)
; X86-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD2]](p0) :: (store (s32) into stack + 8, align 1)
; X86-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; X86-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C3]](s32)
; X86-NEXT: G_STORE [[UV3]](s32), [[PTR_ADD3]](p0) :: (store (s32) into stack + 12, align 1)
; X86-NEXT: CALLpcrel32 &__udivdi3, csr_32, implicit $esp, implicit $ssp, implicit-def $eax, implicit-def $edx
; X86-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $eax
; X86-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $edx
; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
; X86-NEXT: ADJCALLSTACKUP32 16, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; X86-NEXT: RET 0, implicit [[COPY6]](s64)
%0:_(s64) = IMPLICIT_DEF
%1:_(s64) = IMPLICIT_DEF
%2:_(s64) = G_UDIV %0, %1
%3:_(s64) = COPY %2(s64)
RET 0, implicit %3
...
127 changes: 127 additions & 0 deletions llvm/test/CodeGen/X86/GlobalISel/legalize-urem.mir
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,X64
# RUN: llc -mtriple=i686-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,X86

...
---
name: test_urem_i8
tracksRegLiveness: true
body: |
bb.1:
liveins: $edi, $esi
; CHECK-LABEL: name: test_urem_i8
; CHECK: liveins: $edi, $esi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[UREM:%[0-9]+]]:_(s8) = G_UREM [[TRUNC]], [[TRUNC1]]
; CHECK-NEXT: $al = COPY [[UREM]](s8)
; CHECK-NEXT: RET 0, implicit $al
%2:_(s32) = COPY $edi
%0:_(s8) = G_TRUNC %2(s32)
%3:_(s32) = COPY $esi
%1:_(s8) = G_TRUNC %3(s32)
%4:_(s8) = G_UREM %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
...
---
name: test_urem_i16
tracksRegLiveness: true
body: |
bb.1:
liveins: $edi, $esi
; CHECK-LABEL: name: test_urem_i16
; CHECK: liveins: $edi, $esi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[UREM:%[0-9]+]]:_(s16) = G_UREM [[TRUNC]], [[TRUNC1]]
; CHECK-NEXT: $ax = COPY [[UREM]](s16)
; CHECK-NEXT: RET 0, implicit $ax
%2:_(s32) = COPY $edi
%0:_(s16) = G_TRUNC %2(s32)
%3:_(s32) = COPY $esi
%1:_(s16) = G_TRUNC %3(s32)
%4:_(s16) = G_UREM %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
...
---
name: test_urem_i32
tracksRegLiveness: true
body: |
bb.1:
liveins: $edi, $esi
; CHECK-LABEL: name: test_urem_i32
; CHECK: liveins: $edi, $esi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK-NEXT: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[COPY]], [[COPY1]]
; CHECK-NEXT: $eax = COPY [[UREM]](s32)
; CHECK-NEXT: RET 0, implicit $eax
%0:_(s32) = COPY $edi
%1:_(s32) = COPY $esi
%2:_(s32) = G_UREM %0, %1
$eax = COPY %2(s32)
RET 0, implicit $eax
...
---
name: test_urem_i64
tracksRegLiveness: true
body: |
bb.1:
; X64-LABEL: name: test_urem_i64
; X64: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X64-NEXT: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X64-NEXT: [[UREM:%[0-9]+]]:_(s64) = G_UREM [[DEF]], [[DEF1]]
; X64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[UREM]](s64)
; X64-NEXT: RET 0, implicit [[COPY]](s64)
;
; X86-LABEL: name: test_urem_i64
; X86: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X86-NEXT: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X86-NEXT: ADJCALLSTACKDOWN32 16, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](s64)
; X86-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X86-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
; X86-NEXT: G_STORE [[UV]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack, align 1)
; X86-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; X86-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
; X86-NEXT: G_STORE [[UV1]](s32), [[PTR_ADD1]](p0) :: (store (s32) into stack + 4, align 1)
; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF1]](s64)
; X86-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; X86-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s32)
; X86-NEXT: G_STORE [[UV2]](s32), [[PTR_ADD2]](p0) :: (store (s32) into stack + 8, align 1)
; X86-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $esp
; X86-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; X86-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C3]](s32)
; X86-NEXT: G_STORE [[UV3]](s32), [[PTR_ADD3]](p0) :: (store (s32) into stack + 12, align 1)
; X86-NEXT: CALLpcrel32 &__umoddi3, csr_32, implicit $esp, implicit $ssp, implicit-def $eax, implicit-def $edx
; X86-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $eax
; X86-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $edx
; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
; X86-NEXT: ADJCALLSTACKUP32 16, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X86-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; X86-NEXT: RET 0, implicit [[COPY6]](s64)
%0:_(s64) = IMPLICIT_DEF
%1:_(s64) = IMPLICIT_DEF
%2:_(s64) = G_UREM %0, %1
%3:_(s64) = COPY %2(s64)
RET 0, implicit %3
...
114 changes: 0 additions & 114 deletions llvm/test/CodeGen/X86/GlobalISel/x86-legalize-sdiv.mir

This file was deleted.

211 changes: 0 additions & 211 deletions llvm/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir

This file was deleted.

195 changes: 0 additions & 195 deletions llvm/test/CodeGen/X86/GlobalISel/x86-legalize-udiv.mir

This file was deleted.

211 changes: 0 additions & 211 deletions llvm/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir

This file was deleted.

130 changes: 0 additions & 130 deletions llvm/test/CodeGen/X86/GlobalISel/x86-select-sdiv.mir

This file was deleted.

213 changes: 0 additions & 213 deletions llvm/test/CodeGen/X86/GlobalISel/x86-select-srem.mir

This file was deleted.

Loading