Skip to content

Commit

Permalink
[RISCV] Remove hasSideEffects=1 for vsetvli pseudos (#91319)
Browse files Browse the repository at this point in the history
In a similar vein to #90049, we currently model all of the effects of a
vsetvli pseudo:

* VL and VTYPE are marked as defs
* VL preserving x0,x0 vsetvlis doesn't get emitted until
RISCVInsertVSETVLI, and when they are they have implicit uses on VL
* Regular vector pseudos are fully modelled too: Before
RISCVInsertVSETVLI they can be moved between vsetvli pseudos because we
will eventually insert vsetvlis to correct VL and VTYPE. Afterwards,
they will have implicit uses on VL and VTYPE.

Since we model everything we can remove hasSideEffects=1. This gives us
some improvements like sinking in vsetvli-insert-crossbb.ll.

We need to update RISCVDeadRegisterDefinitions to keep handling vsetvli
pseudos since it only operates on instructions with unmodelled side
effects.
  • Loading branch information
lukel97 committed May 15, 2024
1 parent 8e00703 commit ff313ee
Show file tree
Hide file tree
Showing 36 changed files with 1,107 additions and 1,166 deletions.
4 changes: 3 additions & 1 deletion llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,9 @@ bool RISCVDeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
// are reserved for HINT instructions.
const MCInstrDesc &Desc = MI.getDesc();
if (!Desc.mayLoad() && !Desc.mayStore() &&
!Desc.hasUnmodeledSideEffects())
!Desc.hasUnmodeledSideEffects() &&
MI.getOpcode() != RISCV::PseudoVSETVLI &&
MI.getOpcode() != RISCV::PseudoVSETIVLI)
continue;
// For PseudoVSETVLIX0, Rd = X0 has special meaning.
if (MI.getOpcode() == RISCV::PseudoVSETVLIX0)
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Original file line number Diff line number Diff line change
Expand Up @@ -6181,7 +6181,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 0,
//===----------------------------------------------------------------------===//

// Pseudos.
let hasSideEffects = 1, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
// Due to rs1=X0 having special meaning, we need a GPRNoX0 register class for
// the when we aren't using one of the special X0 encodings. Otherwise it could
// be accidentally be made X0 by MachineIR optimizations. To satisfy the
Expand Down
38 changes: 25 additions & 13 deletions llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
Original file line number Diff line number Diff line change
Expand Up @@ -236,41 +236,53 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_param_nxv32i32_nxv32i32_nxv32i32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: li a3, 24
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a3, a2, a1
; CHECK-NEXT: vl8re32.v v8, (a3)
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vl8re32.v v0, (a0)
; CHECK-NEXT: vl8re32.v v8, (a1)
; CHECK-NEXT: vl8re32.v v16, (a2)
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vl8re32.v v0, (a0)
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; CHECK-NEXT: vl8re32.v v8, (a3)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vl8re32.v v16, (a2)
; CHECK-NEXT: vadd.vv v0, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vadd.vv v8, v24, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vadd.vv v24, v24, v8
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vadd.vv v8, v8, v24
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vadd.vv v8, v24, v8
; CHECK-NEXT: vadd.vv v24, v0, v16
; CHECK-NEXT: vadd.vx v16, v8, a4
; CHECK-NEXT: vadd.vx v8, v24, a4
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,11 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
; RV32-NEXT: vs8r.v v8, (a0)
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: vs8r.v v16, (a0)
; RV32-NEXT: add a1, a0, a1
; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: addi a0, sp, 128
; RV32-NEXT: vs8r.v v16, (a1)
; RV32-NEXT: vmv.v.i v16, 0
; RV32-NEXT: call callee_scalable_vector_split_indirect
; RV32-NEXT: addi sp, s0, -144
Expand All @@ -70,11 +70,11 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
; RV64-NEXT: vs8r.v v8, (a0)
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: vs8r.v v16, (a0)
; RV64-NEXT: add a1, a0, a1
; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
; RV64-NEXT: addi a0, sp, 128
; RV64-NEXT: vs8r.v v16, (a1)
; RV64-NEXT: vmv.v.i v16, 0
; RV64-NEXT: call callee_scalable_vector_split_indirect
; RV64-NEXT: addi sp, s0, -144
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ define <2 x half> @nearbyint_v2f16(<2 x half> %v) strictfp {
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: ret
%r = call <2 x half> @llvm.experimental.constrained.nearbyint.v2f16(<2 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <2 x half> %r
Expand All @@ -42,9 +42,9 @@ define <4 x half> @nearbyint_v4f16(<4 x half> %v) strictfp {
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: ret
%r = call <4 x half> @llvm.experimental.constrained.nearbyint.v4f16(<4 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <4 x half> %r
Expand All @@ -65,9 +65,9 @@ define <8 x half> @nearbyint_v8f16(<8 x half> %v) strictfp {
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: ret
%r = call <8 x half> @llvm.experimental.constrained.nearbyint.v8f16(<8 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <8 x half> %r
Expand All @@ -88,9 +88,9 @@ define <16 x half> @nearbyint_v16f16(<16 x half> %v) strictfp {
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: ret
%r = call <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(<16 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <16 x half> %r
Expand All @@ -112,9 +112,9 @@ define <32 x half> @nearbyint_v32f16(<32 x half> %v) strictfp {
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: ret
%r = call <32 x half> @llvm.experimental.constrained.nearbyint.v32f16(<32 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <32 x half> %r
Expand All @@ -135,9 +135,9 @@ define <2 x float> @nearbyint_v2f32(<2 x float> %v) strictfp {
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: ret
%r = call <2 x float> @llvm.experimental.constrained.nearbyint.v2f32(<2 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <2 x float> %r
Expand All @@ -158,9 +158,9 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %v) strictfp {
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: ret
%r = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <4 x float> %r
Expand All @@ -181,9 +181,9 @@ define <8 x float> @nearbyint_v8f32(<8 x float> %v) strictfp {
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: ret
%r = call <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(<8 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <8 x float> %r
Expand All @@ -204,9 +204,9 @@ define <16 x float> @nearbyint_v16f32(<16 x float> %v) strictfp {
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: ret
%r = call <16 x float> @llvm.experimental.constrained.nearbyint.v16f32(<16 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <16 x float> %r
Expand All @@ -227,9 +227,9 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %v) strictfp {
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: ret
%r = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <2 x double> %r
Expand All @@ -250,9 +250,9 @@ define <4 x double> @nearbyint_v4f64(<4 x double> %v) strictfp {
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: ret
%r = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <4 x double> %r
Expand All @@ -273,9 +273,9 @@ define <8 x double> @nearbyint_v8f64(<8 x double> %v) strictfp {
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: fsflags a0
; CHECK-NEXT: ret
%r = call <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <8 x double> %r
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -359,13 +359,13 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: feq.d a0, fa3, fa3
; RV32-NEXT: fmax.d fa3, fa3, fa5
; RV32-NEXT: fmin.d fa3, fa3, fa4
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: fld fa2, 40(sp)
; RV32-NEXT: fcvt.w.d a2, fa3, rtz
; RV32-NEXT: fld fa3, 40(sp)
; RV32-NEXT: neg a0, a0
; RV32-NEXT: and a0, a0, a2
; RV32-NEXT: feq.d a2, fa2, fa2
; RV32-NEXT: fmax.d fa3, fa2, fa5
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: feq.d a2, fa3, fa3
; RV32-NEXT: fmax.d fa3, fa3, fa5
; RV32-NEXT: fmin.d fa3, fa3, fa4
; RV32-NEXT: fcvt.w.d a3, fa3, rtz
; RV32-NEXT: fld fa3, 32(sp)
Expand Down Expand Up @@ -460,13 +460,13 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: feq.d a0, fa3, fa3
; RV64-NEXT: fmax.d fa3, fa3, fa5
; RV64-NEXT: fmin.d fa3, fa3, fa4
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: fld fa2, 40(sp)
; RV64-NEXT: fcvt.l.d a2, fa3, rtz
; RV64-NEXT: fld fa3, 40(sp)
; RV64-NEXT: neg a0, a0
; RV64-NEXT: and a0, a0, a2
; RV64-NEXT: feq.d a2, fa2, fa2
; RV64-NEXT: fmax.d fa3, fa2, fa5
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: feq.d a2, fa3, fa3
; RV64-NEXT: fmax.d fa3, fa3, fa5
; RV64-NEXT: fmin.d fa3, fa3, fa4
; RV64-NEXT: fcvt.l.d a3, fa3, rtz
; RV64-NEXT: fld fa3, 32(sp)
Expand Down Expand Up @@ -557,7 +557,6 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa4, v8
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: fld fa2, 40(sp)
; RV32-NEXT: fmin.d fa4, fa4, fa5
; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
Expand All @@ -566,9 +565,10 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: fmin.d fa2, fa2, fa5
; RV32-NEXT: fcvt.wu.d a2, fa2, rtz
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fld fa2, 48(sp)
; RV32-NEXT: fmin.d fa4, fa4, fa5
; RV32-NEXT: fld fa2, 48(sp)
; RV32-NEXT: fcvt.wu.d a3, fa4, rtz
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vslide1down.vx v8, v10, a0
; RV32-NEXT: fmax.d fa4, fa2, fa3
; RV32-NEXT: fmin.d fa4, fa4, fa5
Expand Down Expand Up @@ -633,7 +633,6 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vfmv.f.s fa4, v8
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: fld fa2, 40(sp)
; RV64-NEXT: fmin.d fa4, fa4, fa5
; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
Expand All @@ -642,9 +641,10 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: fmin.d fa2, fa2, fa5
; RV64-NEXT: fcvt.lu.d a2, fa2, rtz
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fld fa2, 48(sp)
; RV64-NEXT: fmin.d fa4, fa4, fa5
; RV64-NEXT: fld fa2, 48(sp)
; RV64-NEXT: fcvt.lu.d a3, fa4, rtz
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vslide1down.vx v8, v10, a0
; RV64-NEXT: fmax.d fa4, fa2, fa3
; RV64-NEXT: fmin.d fa4, fa4, fa5
Expand Down
Loading

0 comments on commit ff313ee

Please sign in to comment.