Skip to content

Commit

Permalink
[FPEnv][RISCV] Correct strictfp tests.
Browse files Browse the repository at this point in the history
Correct RISC-V strictfp tests to follow the rules documented in the LangRef:
https://llvm.org/docs/LangRef.html#constrained-floating-point-intrinsics

Mostly these tests just needed the strictfp attribute on function definitions.
I've also removed the strictfp attribute from uses of the constrained
intrinsics because it comes by default since D154991, but I only did this
in tests I was changing anyway.

Test changes verified with D146845.
  • Loading branch information
kpneal committed Jul 20, 2023
1 parent 40340cf commit 95c2d01
Show file tree
Hide file tree
Showing 43 changed files with 1,381 additions and 1,381 deletions.
40 changes: 20 additions & 20 deletions llvm/test/CodeGen/RISCV/double-convert-strict.ll
Expand Up @@ -147,7 +147,7 @@ define i32 @fcvt_w_d(double %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict") strictfp
%1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict")
ret i32 %1
}
declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
Expand Down Expand Up @@ -193,14 +193,14 @@ define i32 @fcvt_wu_d(double %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict") strictfp
%1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict")
ret i32 %1
}
declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)

; Test where the fptoui has multiple uses, one of which causes a sext to be
; inserted on RV64.
define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind strictfp {
; CHECKIFD-LABEL: fcvt_wu_d_multiple_use:
; CHECKIFD: # %bb.0:
; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz
Expand Down Expand Up @@ -249,7 +249,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%a = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict") strictfp
%a = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict")
%b = icmp eq i32 %a, 0
%c = select i1 %b, i32 1, i32 %a
ret i32 %c
Expand Down Expand Up @@ -295,7 +295,7 @@ define double @fcvt_d_w(i32 %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %1
}
declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
Expand Down Expand Up @@ -345,7 +345,7 @@ define double @fcvt_d_w_load(ptr %p) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%a = load i32, ptr %p
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %1
}

Expand Down Expand Up @@ -389,7 +389,7 @@ define double @fcvt_d_wu(i32 %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %1
}
declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
Expand Down Expand Up @@ -445,7 +445,7 @@ define double @fcvt_d_wu_load(ptr %p) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%a = load i32, ptr %p
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %1
}

Expand Down Expand Up @@ -495,7 +495,7 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %a, metadata !"fpexcept.strict") strictfp
%1 = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %a, metadata !"fpexcept.strict")
ret i64 %1
}
declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
Expand Down Expand Up @@ -546,7 +546,7 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %a, metadata !"fpexcept.strict") strictfp
%1 = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %a, metadata !"fpexcept.strict")
ret i64 %1
}
declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
Expand Down Expand Up @@ -597,7 +597,7 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %1
}
declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
Expand Down Expand Up @@ -648,7 +648,7 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %1
}
declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
Expand Down Expand Up @@ -692,7 +692,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %1
}
declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metadata)
Expand Down Expand Up @@ -736,7 +736,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %1
}
declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metadata)
Expand Down Expand Up @@ -780,7 +780,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %1
}
declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, metadata)
Expand Down Expand Up @@ -824,13 +824,13 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret double %1
}
declare double @llvm.experimental.constrained.uitofp.f64.i16(i16, metadata, metadata)

; Make sure we select W version of addi on RV64.
define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
; RV32IFD-LABEL: fcvt_d_w_demanded_bits:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi a0, a0, 1
Expand Down Expand Up @@ -899,13 +899,13 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
%3 = add i32 %0, 1
%4 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%4 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
store double %4, ptr %1, align 8
ret i32 %3
}

; Make sure we select W version of addi on RV64.
define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
; RV32IFD-LABEL: fcvt_d_wu_demanded_bits:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi a0, a0, 1
Expand Down Expand Up @@ -972,7 +972,7 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
%3 = add i32 %0, 1
%4 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%4 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
store double %4, ptr %1, align 8
ret i32 %3
}
38 changes: 19 additions & 19 deletions llvm/test/CodeGen/RISCV/float-convert-strict.ll
Expand Up @@ -48,7 +48,7 @@ define i32 @fcvt_w_s(float %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict") strictfp
%1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict")
ret i32 %1
}
declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
Expand Down Expand Up @@ -81,14 +81,14 @@ define i32 @fcvt_wu_s(float %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict") strictfp
%1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict")
ret i32 %1
}
declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)

; Test where the fptoui has multiple uses, one of which causes a sext to be
; inserted on RV64.
define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind strictfp {
; CHECKIF-LABEL: fcvt_wu_s_multiple_use:
; CHECKIF: # %bb.0:
; CHECKIF-NEXT: fcvt.wu.s a0, fa0, rtz
Expand Down Expand Up @@ -124,7 +124,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%a = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") strictfp
%a = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict")
%b = icmp eq i32 %a, 0
%c = select i1 %b, i32 1, i32 %a
ret i32 %c
Expand Down Expand Up @@ -159,7 +159,7 @@ define float @fcvt_s_w(i32 %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %1
}
declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
Expand Down Expand Up @@ -197,7 +197,7 @@ define float @fcvt_s_w_load(ptr %p) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%a = load i32, ptr %p
%1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %1
}

Expand Down Expand Up @@ -230,7 +230,7 @@ define float @fcvt_s_wu(i32 %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %1
}
declare float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata, metadata)
Expand Down Expand Up @@ -280,7 +280,7 @@ define float @fcvt_s_wu_load(ptr %p) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%a = load i32, ptr %p
%1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %1
}

Expand Down Expand Up @@ -330,7 +330,7 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %a, metadata !"fpexcept.strict") strictfp
%1 = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %a, metadata !"fpexcept.strict")
ret i64 %1
}
declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
Expand Down Expand Up @@ -381,7 +381,7 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %a, metadata !"fpexcept.strict") strictfp
%1 = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %a, metadata !"fpexcept.strict")
ret i64 %1
}
declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
Expand Down Expand Up @@ -432,7 +432,7 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %1
}
declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
Expand Down Expand Up @@ -483,7 +483,7 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %1
}
declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
Expand Down Expand Up @@ -516,7 +516,7 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %1
}
declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata)
Expand Down Expand Up @@ -582,7 +582,7 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %1
}
declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata)
Expand Down Expand Up @@ -615,13 +615,13 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
%1 = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%1 = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret float %1
}
declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata)

; Make sure we select W version of addi on RV64.
define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
; RV32IF-LABEL: fcvt_s_w_demanded_bits:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi a0, a0, 1
Expand Down Expand Up @@ -688,13 +688,13 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
%3 = add i32 %0, 1
%4 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%4 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
store float %4, ptr %1, align 4
ret i32 %3
}

; Make sure we select W version of addi on RV64.
define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi a0, a0, 1
Expand Down Expand Up @@ -759,7 +759,7 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
%3 = add i32 %0, 1
%4 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
%4 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
store float %4, ptr %1, align 4
ret i32 %3
}

0 comments on commit 95c2d01

Please sign in to comment.